Skip to content

Commit fd018f3

Browse files
committed
Update
[ghstack-poisoned]
2 parents 73969ee + fd0a261 commit fd018f3

578 files changed

Lines changed: 15343 additions & 11394 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

.bazelversion

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
6.1.1
1+
6.5.0

.ci/docker/build.sh

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -308,6 +308,17 @@ case "$image" in
308308
CONDA_CMAKE=yes
309309
TRITON=yes
310310
;;
311+
pytorch-linux-jammy-xpu-2025.0-py3)
312+
ANACONDA_PYTHON_VERSION=3.9
313+
GCC_VERSION=11
314+
PROTOBUF=yes
315+
DB=yes
316+
VISION=yes
317+
XPU_VERSION=2025.0
318+
NINJA_VERSION=1.9.0
319+
CONDA_CMAKE=yes
320+
TRITON=yes
321+
;;
311322
pytorch-linux-jammy-py3.9-gcc11-inductor-benchmarks)
312323
ANACONDA_PYTHON_VERSION=3.9
313324
GCC_VERSION=11

.ci/docker/common/install_cuda.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
set -ex
44

55
NCCL_VERSION=v2.21.5-1
6-
CUDNN_VERSION=9.1.0.70
6+
CUDNN_VERSION=9.5.1.17
77

88
function install_cusparselt_040 {
99
# cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
@@ -39,6 +39,7 @@ function install_cusparselt_062 {
3939
}
4040

4141
function install_118 {
42+
CUDNN_VERSION=9.1.0.70
4243
echo "Installing CUDA 11.8 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.4.0"
4344
rm -rf /usr/local/cuda-11.8 /usr/local/cuda
4445
# install CUDA 11.8.0 in the same container
@@ -105,6 +106,7 @@ function install_121 {
105106
}
106107

107108
function install_124 {
109+
CUDNN_VERSION=9.1.0.70
108110
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2"
109111
rm -rf /usr/local/cuda-12.4 /usr/local/cuda
110112
# install CUDA 12.4.1 in the same container

.ci/docker/common/install_cuda_aarch64.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
set -ex
55

66
NCCL_VERSION=v2.21.5-1
7-
CUDNN_VERSION=9.1.0.70
7+
CUDNN_VERSION=9.5.1.17
88

99
function install_cusparselt_062 {
1010
# cuSparseLt license: https://docs.nvidia.com/cuda/cusparselt/license.html
@@ -18,6 +18,7 @@ function install_cusparselt_062 {
1818
}
1919

2020
function install_124 {
21+
CUDNN_VERSION=9.1.0.70
2122
echo "Installing CUDA 12.4.1 and cuDNN ${CUDNN_VERSION} and NCCL ${NCCL_VERSION} and cuSparseLt-0.6.2"
2223
rm -rf /usr/local/cuda-12.4 /usr/local/cuda
2324
# install CUDA 12.4.1 in the same container

.ci/docker/common/install_cudnn.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,9 @@ if [[ -n "${CUDNN_VERSION}" ]]; then
44
# cuDNN license: https://developer.nvidia.com/cudnn/license_agreement
55
mkdir tmp_cudnn
66
pushd tmp_cudnn
7-
if [[ ${CUDA_VERSION:0:2} == "12" ]]; then
7+
if [[ ${CUDA_VERSION:0:4} == "12.6" ]]; then
8+
CUDNN_NAME="cudnn-linux-x86_64-9.5.1.17_cuda12-archive"
9+
elif [[ ${CUDA_VERSION:0:2} == "12" ]]; then
810
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda12-archive"
911
elif [[ ${CUDA_VERSION:0:2} == "11" ]]; then
1012
CUDNN_NAME="cudnn-linux-x86_64-9.1.0.70_cuda11-archive"

.ci/docker/common/install_xpu.sh

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -24,10 +24,10 @@ function install_ubuntu() {
2424
| tee /etc/apt/sources.list.d/intel-gpu-${VERSION_CODENAME}.list
2525
# To add the online network network package repository for the Intel Support Packages
2626
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \
27-
| gpg --dearmor > /usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg
28-
echo "deb [signed-by=/usr/share/keyrings/intel-for-pytorch-gpu-dev-keyring.gpg] \
29-
https://apt.repos.intel.com/intel-for-pytorch-gpu-dev all main" \
30-
| tee /etc/apt/sources.list.d/intel-for-pytorch-gpu-dev.list
27+
| gpg --dearmor > /usr/share/keyrings/oneapi-archive-keyring.gpg.gpg
28+
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg.gpg] \
29+
https://apt.repos.intel.com/${XPU_REPO_NAME} all main" \
30+
| tee /etc/apt/sources.list.d/oneAPI.list
3131

3232
# Update the packages list and repository index
3333
apt-get update
@@ -47,11 +47,7 @@ function install_ubuntu() {
4747
# Development Packages
4848
apt-get install -y libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev level-zero-dev
4949
# Install Intel Support Packages
50-
if [ -n "$XPU_VERSION" ]; then
51-
apt-get install -y intel-for-pytorch-gpu-dev-${XPU_VERSION} intel-pti-dev-0.9
52-
else
53-
apt-get install -y intel-for-pytorch-gpu-dev-0.5 intel-pti-dev-0.9
54-
fi
50+
apt-get install -y ${XPU_PACKAGES}
5551

5652
# Cleanup
5753
apt-get autoclean && apt-get clean
@@ -61,30 +57,32 @@ function install_ubuntu() {
6157
function install_rhel() {
6258
. /etc/os-release
6359
if [[ "${ID}" == "rhel" ]]; then
64-
if [[ ! " 8.6 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
60+
if [[ ! " 8.8 8.9 9.0 9.2 9.3 " =~ " ${VERSION_ID} " ]]; then
6561
echo "RHEL version ${VERSION_ID} not supported"
6662
exit
6763
fi
6864
elif [[ "${ID}" == "almalinux" ]]; then
6965
# Workaround for almalinux8 which used by quay.io/pypa/manylinux_2_28_x86_64
70-
VERSION_ID="8.6"
66+
VERSION_ID="8.8"
7167
fi
7268

7369
dnf install -y 'dnf-command(config-manager)'
7470
# To add the online network package repository for the GPU Driver
7571
dnf config-manager --add-repo \
7672
https://repositories.intel.com/gpu/rhel/${VERSION_ID}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_ID}.repo
7773
# To add the online network network package repository for the Intel Support Packages
78-
tee > /etc/yum.repos.d/intel-for-pytorch-gpu-dev.repo << EOF
79-
[intel-for-pytorch-gpu-dev]
74+
tee > /etc/yum.repos.d/oneAPI.repo << EOF
75+
[oneAPI]
8076
name=Intel for Pytorch GPU dev repository
81-
baseurl=https://yum.repos.intel.com/intel-for-pytorch-gpu-dev
77+
baseurl=https://yum.repos.intel.com/${XPU_REPO_NAME}
8278
enabled=1
8379
gpgcheck=1
8480
repo_gpgcheck=1
8581
gpgkey=https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
8682
EOF
8783

84+
# Install Intel Support Packages
85+
yum install -y ${XPU_PACKAGES}
8886
# The xpu-smi packages
8987
dnf install -y xpu-smi
9088
# Compute and Media Runtimes
@@ -99,8 +97,6 @@ EOF
9997
dnf install -y --refresh \
10098
intel-igc-opencl-devel level-zero-devel intel-gsc-devel libmetee-devel \
10199
level-zero-devel
102-
# Install Intel Support Packages
103-
yum install -y intel-for-pytorch-gpu-dev-0.5 intel-pti-dev-0.9
104100

105101
# Cleanup
106102
dnf clean all
@@ -122,7 +118,7 @@ function install_sles() {
122118
https://repositories.intel.com/gpu/sles/${VERSION_SP}${XPU_DRIVER_VERSION}/unified/intel-gpu-${VERSION_SP}.repo
123119
rpm --import https://repositories.intel.com/gpu/intel-graphics.key
124120
# To add the online network network package repository for the Intel Support Packages
125-
zypper addrepo https://yum.repos.intel.com/intel-for-pytorch-gpu-dev intel-for-pytorch-gpu-dev
121+
zypper addrepo https://yum.repos.intel.com/${XPU_REPO_NAME} oneAPI
126122
rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB
127123

128124
# The xpu-smi packages
@@ -134,7 +130,7 @@ function install_sles() {
134130
zypper install -y libigdfcl-devel intel-igc-cm libigfxcmrt-devel level-zero-devel
135131

136132
# Install Intel Support Packages
137-
zypper install -y intel-for-pytorch-gpu-dev-0.5 intel-pti-dev-0.9
133+
zypper install -y ${XPU_PACKAGES}
138134

139135
}
140136

@@ -145,6 +141,13 @@ if [[ "${XPU_DRIVER_TYPE,,}" == "rolling" ]]; then
145141
XPU_DRIVER_VERSION=""
146142
fi
147143

144+
XPU_REPO_NAME="intel-for-pytorch-gpu-dev"
145+
XPU_PACKAGES="intel-for-pytorch-gpu-dev-0.5 intel-pti-dev-0.9"
146+
if [[ "$XPU_VERSION" == "2025.0" ]]; then
147+
XPU_REPO_NAME="oneapi"
148+
XPU_PACKAGES="intel-deep-learning-essentials-2025.0"
149+
fi
150+
148151
# The installation depends on the base OS
149152
ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"')
150153
case "$ID" in

.ci/docker/manywheel/Dockerfile_2_28

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,5 +163,6 @@ ENV XPU_DRIVER_TYPE ROLLING
163163
RUN python3 -m pip install --upgrade pip && \
164164
python3 -mpip install cmake==3.28.4
165165
ADD ./common/install_xpu.sh install_xpu.sh
166+
ENV XPU_VERSION 2025.0
166167
RUN bash ./install_xpu.sh && rm install_xpu.sh
167168
RUN pushd /opt/_internal && tar -xJf static-libs-for-embedding-only.tar.xz && popd

.ci/manywheel/build_cpu.sh

Lines changed: 2 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,8 @@ fi
2020
DIR_SUFFIX=cpu
2121
if [[ "$GPU_ARCH_TYPE" == "xpu" ]]; then
2222
DIR_SUFFIX=xpu
23-
# Refer https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpu/2-5.html
24-
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
23+
# Refer https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html
24+
source /opt/intel/oneapi/compiler/latest/env/vars.sh
2525
source /opt/intel/oneapi/pti/latest/env/vars.sh
2626
export USE_STATIC_MKL=1
2727
fi
@@ -63,28 +63,18 @@ DEPS_SONAME=(
6363
if [[ "$GPU_ARCH_TYPE" == "xpu" ]]; then
6464
echo "Bundling with xpu support package libs."
6565
DEPS_LIST+=(
66-
"/opt/intel/oneapi/compiler/latest/lib/libsycl-preview.so.7"
6766
"/opt/intel/oneapi/compiler/latest/lib/libOpenCL.so.1"
68-
"/opt/intel/oneapi/compiler/latest/lib/libxptifw.so"
6967
"/opt/intel/oneapi/compiler/latest/lib/libsvml.so"
7068
"/opt/intel/oneapi/compiler/latest/lib/libirng.so"
7169
"/opt/intel/oneapi/compiler/latest/lib/libimf.so"
7270
"/opt/intel/oneapi/compiler/latest/lib/libintlc.so.5"
73-
"/opt/intel/oneapi/compiler/latest/lib/libpi_level_zero.so"
74-
"/opt/intel/oneapi/pti/latest/lib/libpti_view.so.0.9"
75-
"/opt/intel/oneapi/pti/latest/lib/libpti.so.0.9"
7671
)
7772
DEPS_SONAME+=(
78-
"libsycl-preview.so.7"
7973
"libOpenCL.so.1"
80-
"libxptifw.so"
8174
"libsvml.so"
8275
"libirng.so"
8376
"libimf.so"
8477
"libintlc.so.5"
85-
"libpi_level_zero.so"
86-
"libpti_view.so.0.9"
87-
"libpti.so.0.9"
8878
)
8979
fi
9080

.ci/pytorch/common_utils.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,9 @@ function get_bazel() {
105105
# version of Bazelisk to fetch the platform specific version of
106106
# Bazel to use from .bazelversion.
107107
retry curl --location --output tools/bazel \
108-
https://raw.githubusercontent.com/bazelbuild/bazelisk/v1.16.0/bazelisk.py
108+
https://raw.githubusercontent.com/bazelbuild/bazelisk/v1.23.0/bazelisk.py
109109
shasum --algorithm=1 --check \
110-
<(echo 'd4369c3d293814d3188019c9f7527a948972d9f8 tools/bazel')
110+
<(echo '01df9cf7f08dd80d83979ed0d0666a99349ae93c tools/bazel')
111111
chmod u+x tools/bazel
112112
}
113113

.ci/pytorch/test.sh

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -169,9 +169,13 @@ fi
169169

170170
if [[ "$BUILD_ENVIRONMENT" == *xpu* ]]; then
171171
# Source Intel oneAPI envrioment script to enable xpu runtime related libraries
172-
# refer to https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpu/2-5.html
172+
# refer to https://www.intel.com/content/www/us/en/developer/articles/tool/pytorch-prerequisites-for-intel-gpus.html
173173
# shellcheck disable=SC1091
174174
source /opt/intel/oneapi/compiler/latest/env/vars.sh
175+
if [ -f /opt/intel/oneapi/umf/latest/env/vars.sh ]; then
176+
# shellcheck disable=SC1091
177+
source /opt/intel/oneapi/umf/latest/env/vars.sh
178+
fi
175179
# Check XPU status before testing
176180
xpu-smi discovery
177181
fi
@@ -1406,7 +1410,11 @@ if ! [[ "${BUILD_ENVIRONMENT}" == *libtorch* || "${BUILD_ENVIRONMENT}" == *-baze
14061410
(cd test && python -c "import torch; print(torch.__config__.show())")
14071411
(cd test && python -c "import torch; print(torch.__config__.parallel_info())")
14081412
fi
1409-
if [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then
1413+
if [[ "${TEST_CONFIG}" == *numpy_2* ]]; then
1414+
# Install numpy-2.0.2 and test inductor tracing
1415+
python -mpip install --pre numpy==2.0.2
1416+
python test/run_test.py --include dynamo/test_unspec.py
1417+
elif [[ "${BUILD_ENVIRONMENT}" == *aarch64* && "${TEST_CONFIG}" != *perf_cpu_aarch64* ]]; then
14101418
test_linux_aarch64
14111419
elif [[ "${TEST_CONFIG}" == *backward* ]]; then
14121420
test_forward_backward_compatibility

0 commit comments

Comments
 (0)