Skip to content

Commit cc322e2

Browse files
committed
Update on "De-prioritise Dimname and DimnameList in python overload resolution"
`None` being a valid `Dimname` is awkward for optional `dim` arguments, as found on NumPy's reduction functions like `std` and `var`. In these cases `dim=None` should mean an all-reduction, but instead you get an error "Please look up dimensions by name". I've also had to fix `FunctionParameter::check` to actually check the first element of `INT_LIST` arguments and reject non-int types. Otherwise, the dim names end up calling the `int[]` overload and fail. [ghstack-poisoned]
2 parents fdc6530 + 310e1ca commit cc322e2

797 files changed

Lines changed: 27865 additions & 8424 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.circleci/cimodel/data/binary_build_data.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,14 @@ def get_processor_arch_name(gpu_version):
5252
"3.7",
5353
],
5454
)),
55+
macos_arm64=([None], OrderedDict(
56+
wheel=[
57+
"3.8",
58+
],
59+
conda=[
60+
"3.8",
61+
],
62+
)),
5563
# Skip CUDA-9.2 builds on Windows
5664
windows=(
5765
[v for v in dimensions.GPU_VERSIONS if v not in ['cuda92'] + dimensions.ROCM_VERSION_LABELS],

.circleci/cimodel/data/binary_build_definitions.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def gen_build_env_list(smoke):
164164
c.find_prop("gpu"),
165165
c.find_prop("package_format"),
166166
[c.find_prop("pyver")],
167-
c.find_prop("smoke"),
167+
c.find_prop("smoke") and not (c.find_prop("os_name") == "macos_arm64"), # don't test arm64
168168
c.find_prop("libtorch_variant"),
169169
c.find_prop("gcc_config_variant"),
170170
c.find_prop("libtorch_config_variant"),
@@ -216,7 +216,9 @@ def get_jobs(toplevel_key, smoke):
216216
configs = gen_build_env_list(smoke)
217217
phase = "build" if toplevel_key == "binarybuilds" else "test"
218218
for build_config in configs:
219-
jobs_list.append(build_config.gen_workflow_job(phase, nightly=True))
219+
# don't test for macos_arm64 as it's cross compiled
220+
if phase != "test" or build_config.os != "macos_arm64":
221+
jobs_list.append(build_config.gen_workflow_job(phase, nightly=True))
220222

221223
return jobs_list
222224

.circleci/cimodel/data/dimensions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
CUDA_VERSIONS = [
44
"101",
55
"102",
6-
"110",
6+
"111",
77
]
88

99
ROCM_VERSIONS = [

.circleci/cimodel/data/pytorch_build_data.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,16 @@
6767
]),
6868
]),
6969
]),
70+
("11.2", [
71+
("3.8", [
72+
X(True),
73+
("libtorch", [
74+
(True, [
75+
('build_only', [X(True)]),
76+
]),
77+
]),
78+
]),
79+
]),
7080
]),
7181
]),
7282
("bionic", [

.circleci/cimodel/data/simple/docker_definitions.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,15 @@
66

77
# TODO: make this generated from a matrix rather than just a static list
88
IMAGE_NAMES = [
9-
"pytorch-linux-bionic-cuda11.1-cudnn8-py3.6-gcc9",
10-
"pytorch-linux-bionic-cuda11.1-cudnn8-py3.8-gcc9",
11-
"pytorch-linux-bionic-cuda11.0-cudnn8-py3.6-gcc9",
12-
"pytorch-linux-bionic-cuda11.0-cudnn8-py3.8-gcc9",
139
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.8-gcc9",
1410
"pytorch-linux-bionic-py3.6-clang9",
1511
"pytorch-linux-bionic-cuda10.2-cudnn7-py3.6-clang9",
1612
"pytorch-linux-bionic-py3.8-gcc9",
1713
"pytorch-linux-xenial-cuda10-cudnn7-py3-gcc7",
1814
"pytorch-linux-xenial-cuda10.1-cudnn7-py3-gcc7",
1915
"pytorch-linux-xenial-cuda10.2-cudnn7-py3-gcc7",
20-
"pytorch-linux-xenial-cuda11.0-cudnn8-py3-gcc7",
2116
"pytorch-linux-xenial-cuda11.1-cudnn8-py3-gcc7",
17+
"pytorch-linux-xenial-cuda11.2-cudnn8-py3-gcc7",
2218
"pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc5.4",
2319
"pytorch-linux-xenial-cuda9.2-cudnn7-py3-gcc7",
2420
"pytorch-linux-xenial-py3-clang5-android-ndk-r19c",
@@ -31,6 +27,7 @@
3127
"pytorch-linux-xenial-py3.6-gcc7",
3228
"pytorch-linux-bionic-rocm3.9-py3.6",
3329
"pytorch-linux-bionic-rocm3.10-py3.6",
30+
"pytorch-linux-bionic-rocm4.0.1-py3.6",
3431
]
3532

3633

.circleci/cimodel/data/simple/macos_definitions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def gen_tree(self):
2121
return [{full_job_name: props_dict}]
2222

2323

24-
WORKFLOW_DATA = [MacOsJob("10_13"), MacOsJob("10_13", True)]
24+
WORKFLOW_DATA = [MacOsJob("10_15"), MacOsJob("10_13"), MacOsJob("10_13", True)]
2525

2626

2727
def get_workflow_jobs():

.circleci/cimodel/data/simple/mobile_definitions.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,12 @@ def gen_tree(self):
6565
["custom", "build", "dynamic"]
6666
),
6767

68+
MobileJob(
69+
DOCKER_IMAGE_NDK,
70+
[DOCKER_REQUIREMENT_NDK],
71+
["custom", "build", "static"]
72+
),
73+
6874
# Use LLVM-DEV toolchain in android-ndk-r19c docker image
6975
# Most of this CI is already covered by "mobile-custom-build-dynamic" job
7076
MobileJob(

.circleci/cimodel/data/windows_build_definitions.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,10 @@ def TruePred(_):
136136
WindowsJob(None, _VC2019, CudaVersion(11, 1)),
137137
WindowsJob(1, _VC2019, CudaVersion(11, 1), master_only_pred=TruePred),
138138
WindowsJob(2, _VC2019, CudaVersion(11, 1), master_only_pred=TruePred),
139+
# VS2019 CUDA-11.2
140+
WindowsJob(None, _VC2019, CudaVersion(11, 2), master_only_pred=TruePred),
141+
WindowsJob(1, _VC2019, CudaVersion(11, 2), master_only_pred=TruePred),
142+
WindowsJob(2, _VC2019, CudaVersion(11, 2), master_only_pred=TruePred),
139143
# VS2019 CPU-only
140144
WindowsJob(None, _VC2019, None),
141145
WindowsJob(1, _VC2019, None, master_only_pred=TruePred),

0 commit comments

Comments
 (0)