Skip to content

Commit 79174ef

Browse files
committed
Update on "vmap: fixed to work with functools.partial"
There was a bug where we always tried to grab the `__name__` attribute of the function passed in by the user. Not all Callables have the `__name__` attribute, an example being a Callable produced by functools.partial. This PR modifies the error-checking code to use `repr` if `__name__` is not available. Furthermore, it moves the "get the name of this function" functionality to the actual error sites as an optimization so we don't spend time trying to compute `__repr__` for the Callable if there is no error. Test Plan: - `pytest test/test_vmap.py -v`, added new tests. Differential Revision: [D23130235](https://our.internmc.facebook.com/intern/diff/D23130235) [ghstack-poisoned]
2 parents c79b17f + 248b6a3 commit 79174ef

255 files changed

Lines changed: 9721 additions & 3243 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.circleci/cimodel/data/pytorch_build_data.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,9 @@
2121
]),
2222
("clang", [
2323
("5", [
24-
XImportant("3.6"), # This is actually the ASAN build
24+
("3.6", [
25+
("asan", [XImportant(True)]),
26+
]),
2527
]),
2628
]),
2729
("cuda", [
@@ -126,6 +128,7 @@ def child_constructor(self):
126128
experimental_feature = self.find_prop("experimental_feature")
127129

128130
next_nodes = {
131+
"asan": AsanConfigNode,
129132
"xla": XlaConfigNode,
130133
"parallel_tbb": ParallelTBBConfigNode,
131134
"parallel_native": ParallelNativeConfigNode,
@@ -148,6 +151,17 @@ def child_constructor(self):
148151
return ImportantConfigNode
149152

150153

154+
class AsanConfigNode(TreeConfigNode):
155+
def modify_label(self, label):
156+
return "Asan=" + str(label)
157+
158+
def init2(self, node_name):
159+
self.props["is_asan"] = node_name
160+
161+
def child_constructor(self):
162+
return ImportantConfigNode
163+
164+
151165
class ParallelTBBConfigNode(TreeConfigNode):
152166
def modify_label(self, label):
153167
return "PARALLELTBB=" + str(label)

.circleci/cimodel/data/pytorch_build_definitions.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def gen_dependent_configs(xenial_parent_config):
181181
c = Conf(
182182
xenial_parent_config.distro,
183183
["py3"] + parms,
184-
pyver="3.6",
184+
pyver=xenial_parent_config.pyver,
185185
cuda_version=xenial_parent_config.cuda_version,
186186
restrict_phases=["test"],
187187
gpu_resource=gpu,
@@ -257,6 +257,7 @@ def instantiate_configs():
257257
compiler_name = fc.find_prop("compiler_name")
258258
compiler_version = fc.find_prop("compiler_version")
259259
is_xla = fc.find_prop("is_xla") or False
260+
is_asan = fc.find_prop("is_asan") or False
260261
parms_list_ignored_for_docker_image = []
261262

262263
vulkan = fc.find_prop("vulkan") or False
@@ -292,12 +293,11 @@ def instantiate_configs():
292293
gcc_version = compiler_name + (fc.find_prop("compiler_version") or "")
293294
parms_list.append(gcc_version)
294295

295-
# TODO: This is a nasty special case
296-
if gcc_version == "clang5" and not is_xla:
297-
parms_list.append("asan")
298-
python_version = fc.find_prop("pyver")
299-
parms_list[0] = fc.find_prop("abbreviated_pyver")
300-
restrict_phases = ["build", "test1", "test2"]
296+
if is_asan:
297+
parms_list.append("asan")
298+
python_version = fc.find_prop("pyver")
299+
parms_list[0] = fc.find_prop("abbreviated_pyver")
300+
restrict_phases = ["build", "test1", "test2"]
301301

302302
if cuda_version:
303303
cuda_gcc_version = fc.find_prop("cuda_gcc_override") or "gcc7"

.gitignore

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,10 @@ test/test-reports/
4949
third_party/build/
5050
tools/shared/_utils_internal.py
5151
torch.egg-info/
52-
torch/__init__.pyi
5352
torch/_C/__init__.pyi
5453
torch/_C/_nn.pyi
5554
torch/_C/_VariableFunctions.pyi
5655
torch/nn/functional.pyi
57-
torch/nn/modules/*.pyi
5856
torch/csrc/autograd/generated/*
5957
# Listed manually because some files in this directory are not generated
6058
torch/testing/_internal/generated/annotated_fn_args.py

.jenkins/caffe2/test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@ if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then
188188
# default pip version is too old(9.0.2), unable to support tag `manylinux2010`.
189189
# Fix the pip error: Couldn't find a version that satisfies the requirement
190190
sudo pip install --upgrade pip
191-
pip install -q --user -i https://test.pypi.org/simple/ ort-nightly==1.4.0.dev202007311
191+
pip install -q --user -i https://test.pypi.org/simple/ ort-nightly==1.4.0.dev202008122
192192
fi
193193
"$ROOT_DIR/scripts/onnx/test.sh"
194194
fi

CMakeLists.txt

Lines changed: 13 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ option(BUILD_JNI "Build JNI bindings" OFF)
138138
cmake_dependent_option(
139139
INSTALL_TEST "Install test binaries if BUILD_TEST is on" ON
140140
"BUILD_TEST" OFF)
141-
option(CLANG_CODE_COVERAGE "Compile C/C++ with clang code coverage flags" OFF)
141+
option(CODE_COVERAGE "Compile C/C++ with code coverage flags" OFF)
142142
option(COLORIZE_OUTPUT "Colorize output during compilation" ON)
143143
option(USE_ASAN "Use Address Sanitizer" OFF)
144144
option(USE_TSAN "Use Thread Sanitizer" OFF)
@@ -596,10 +596,18 @@ if(USE_ASAN)
596596
string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fsanitize=address")
597597
endif()
598598

599-
# invoke clang code coverage flags
600-
if(CLANG_CODE_COVERAGE)
601-
string(APPEND CMAKE_C_FLAGS " -fprofile-instr-generate -fcoverage-mapping")
602-
string(APPEND CMAKE_CXX_FLAGS " -fprofile-instr-generate -fcoverage-mapping")
599+
# Add code coverage flags to supported compilers
600+
if(CODE_COVERAGE)
601+
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
602+
string(APPEND CMAKE_C_FLAGS " --coverage -fprofile-abs-path")
603+
string(APPEND CMAKE_CXX_FLAGS " --coverage -fprofile-abs-path")
604+
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
605+
string(APPEND CMAKE_C_FLAGS " -fprofile-instr-generate -fcoverage-mapping")
606+
string(APPEND CMAKE_CXX_FLAGS " -fprofile-instr-generate -fcoverage-mapping")
607+
else()
608+
message(ERROR "Code coverage for compiler ${CMAKE_CXX_COMPILER_ID} is unsupported")
609+
endif()
610+
603611
endif()
604612

605613
if(APPLE)

CONTRIBUTING.md

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
- [Contributing to PyTorch](#contributing-to-pytorch)
44
- [Developing PyTorch](#developing-pytorch)
5+
- [Nightly Checkout](#nightly-checkout)
56
- [Codebase structure](#codebase-structure)
67
- [Unit testing](#unit-testing)
78
- [Better local unit tests with pytest](#better-local-unit-tests-with-pytest)
@@ -121,6 +122,28 @@ You do not need to repeatedly install after modifying Python files.
121122
In case you want to reinstall, make sure that you uninstall PyTorch first by running `pip uninstall torch`
122123
and `python setup.py clean`. Then you can install in `develop` mode again.
123124

125+
## Nightly Checkout
126+
127+
The `tools/nightly_checkout.py` script is provided to ease pure Python development of
128+
PyTorch. This uses conda and git to check out the nightly development version of PyTorch
129+
and installs pre-built binaries into the current repository. This is like a development
130+
or editable install, but without needing the ability to compile any C++ code.
131+
132+
You can use this script to check out a new nightly branch with the following::
133+
134+
```sh
135+
$ ./tools/nightly_checkout.py -b my-nightly-branch
136+
$ conda activate pytorch-deps
137+
```
138+
139+
Or if you would like to re-use an existing conda environment, you can pass in
140+
the regular environment parameters (--name or --prefix)::
141+
142+
```sh
143+
$ ./tools/nightly_checkout.py -b my-nightly-branch -n my-env
144+
$ conda activate my-env
145+
```
146+
124147
## Codebase structure
125148

126149
* [c10](c10) - Core library files that work everywhere, both server

aten/src/ATen/core/Formatting.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -281,7 +281,8 @@ std::ostream& print(std::ostream& stream, const Tensor & tensor_, int64_t linesi
281281
if (tensor_.qscheme() == c10::kPerTensorAffine) {
282282
stream << ", scale: " << tensor_.q_scale();
283283
stream << ", zero_point: " << tensor_.q_zero_point();
284-
} else if (tensor_.qscheme() == c10::kPerChannelAffine) {
284+
} else if (tensor_.qscheme() == c10::kPerChannelAffine ||
285+
tensor_.qscheme() == c10::kPerChannelAffineFloatQParams) {
285286
stream << ", scales: ";
286287
Tensor scales = tensor_.q_per_channel_scales();
287288
print(stream, scales, linesize);

aten/src/ATen/core/NamedRegistrations.cpp

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,9 @@ TORCH_LIBRARY_IMPL(aten, Named, m) {
190190
m.impl("gt.Scalar_out", CppFunction::makeFallthrough());
191191
m.impl("gt.Tensor", CppFunction::makeFallthrough());
192192
m.impl("gt.Tensor_out", CppFunction::makeFallthrough());
193+
m.impl("hypot", CppFunction::makeFallthrough());
194+
m.impl("hypot.out", CppFunction::makeFallthrough());
195+
m.impl("hypot_", CppFunction::makeFallthrough());
193196
m.impl("imag", CppFunction::makeFallthrough());
194197
m.impl("index_fill.Dimname_Scalar", CppFunction::makeFallthrough());
195198
m.impl("index_fill.Dimname_Tensor", CppFunction::makeFallthrough());
@@ -318,6 +321,9 @@ TORCH_LIBRARY_IMPL(aten, Named, m) {
318321
m.impl("neg", CppFunction::makeFallthrough());
319322
m.impl("neg.out", CppFunction::makeFallthrough());
320323
m.impl("neg_", CppFunction::makeFallthrough());
324+
m.impl("nextafter", CppFunction::makeFallthrough());
325+
m.impl("nextafter.out", CppFunction::makeFallthrough());
326+
m.impl("nextafter_", CppFunction::makeFallthrough());
321327
m.impl("normal_", CppFunction::makeFallthrough());
322328
m.impl("ones_like", CppFunction::makeFallthrough());
323329
m.impl("output_nr", CppFunction::makeFallthrough());

aten/src/ATen/core/aten_interned_strings.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ _(aten, _acos) \
2424
_(aten, _addmv) \
2525
_(aten, _addr) \
2626
_(aten, _arange) \
27-
_(aten, _acosh) \
2827
_(aten, _asinh) \
2928
_(aten, _atanh) \
3029
_(aten, _argmax) \
@@ -247,6 +246,8 @@ _(aten, clamp_min) \
247246
_(aten, clone) \
248247
_(aten, coalesce) \
249248
_(aten, combinations) \
249+
_(aten, complex) \
250+
_(aten, polar) \
250251
_(aten, constant_pad_nd) \
251252
_(aten, contiguous) \
252253
_(aten, conv1d) \
@@ -302,6 +303,7 @@ _(aten, div) \
302303
_(aten, div_) \
303304
_(aten, dot) \
304305
_(aten, dropout) \
306+
_(aten, dstack) \
305307
_(aten, eig) \
306308
_(aten, einsum) \
307309
_(aten, elu) \
@@ -376,6 +378,8 @@ _(aten, hardtanh_forward) \
376378
_(aten, hinge_embedding_loss) \
377379
_(aten, histc) \
378380
_(aten, hspmm) \
381+
_(aten, hstack) \
382+
_(aten, hypot) \
379383
_(aten, ifft) \
380384
_(aten, index) \
381385
_(aten, index_add) \
@@ -521,6 +525,7 @@ _(aten, native_tensor) \
521525
_(aten, native_zero) \
522526
_(aten, ne) \
523527
_(aten, neg) \
528+
_(aten, nextafter) \
524529
_(aten, bitwise_and) \
525530
_(aten, bitwise_not) \
526531
_(aten, bitwise_or) \
@@ -741,6 +746,7 @@ _(aten, vander) \
741746
_(aten, var) \
742747
_(aten, view) \
743748
_(aten, view_as) \
749+
_(aten, vstack) \
744750
_(aten, where) \
745751
_(aten, zero) \
746752
_(aten, zeros) \

aten/src/ATen/core/boxing/impl/kernel_function_test.cpp

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -674,16 +674,6 @@ TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegistered_th
674674
expectCallsConcatUnboxed(DispatchKey::CPU);
675675
}
676676

677-
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegisteredUnboxedOnly_thenCanBeCalledUnboxed) {
678-
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", RegisterOperators::options().impl_unboxedOnlyKernel<decltype(concatKernel), &concatKernel>(DispatchKey::CPU));
679-
expectCallsConcatUnboxed(DispatchKey::CPU);
680-
}
681-
682-
TEST(OperatorRegistrationTest_FunctionBasedKernel, givenKernel_whenRegisteredUnboxedOnly_thenCannotBeCalledBoxed) {
683-
auto registrar = RegisterOperators().op("_test::my_op(Tensor dummy, str a, str b, int c) -> str", RegisterOperators::options().impl_unboxedOnlyKernel<decltype(concatKernel), &concatKernel>(DispatchKey::CPU));
684-
expectCannotCallConcatBoxed(DispatchKey::CPU);
685-
}
686-
687677
std::tuple<int64_t, Tensor> kernelForSchemaInference(Tensor arg1, int64_t arg2, const c10::List<Tensor>& arg3) {
688678
return {};
689679
}

0 commit comments

Comments
 (0)