-
Notifications
You must be signed in to change notification settings - Fork 27.4k
Fails to compile with GCC 12.1.0 #77939
Description
🐛 Describe the bug
I followed the instructions to compile from source code within conda environment on arch linux. The compilation fails with the following error:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:188:10: note: ‘__Y’ was declared here
188 | __m512 __Y = __Y;
| ^~~
In function ‘__m512i _mm512_cvtps_epi32(__m512)’,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:331:47:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:14044:52: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
14044 | return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
14045 | (__v16si)
| ~~~~~~~~~
14046 | _mm512_undefined_epi32 (),
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
14047 | (__mmask16) -1,
| ~~~~~~~~~~~~~~~
14048 | _MM_FROUND_CUR_DIRECTION);
| ~~~~~~~~~~~~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:206:11: note: ‘__Y’ was declared here
206 | __m512i __Y = __Y;
| ^~~
In function ‘__m512i _mm512_permutexvar_epi32(__m512i, __m512i)’,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:353:45:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:7027:53: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
7027 | return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
7028 | (__v16si) __X,
| ~~~~~~~~~~~~~~
7029 | (__v16si)
| ~~~~~~~~~
7030 | _mm512_undefined_epi32 (),
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
7031 | (__mmask16) -1);
| ~~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:206:11: note: ‘__Y’ was declared here
206 | __m512i __Y = __Y;
| ^~~
In function ‘__m128i _mm512_extracti32x4_epi32(__m512i, int)’,
inlined from ‘__m128i _mm512_castsi512_si128(__m512i)’ at /usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:15829:10,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:373:25:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:6045:53: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
6045 | return (__m128i) __builtin_ia32_extracti32x4_mask ((__v16si) __A,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
6046 | __imm,
| ~~~~~~
6047 | (__v4si)
| ~~~~~~~~
6048 | _mm_undefined_si128 (),
| ~~~~~~~~~~~~~~~~~~~~~~~
6049 | (__mmask8) -1);
| ~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/emmintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 8; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/emmintrin.h:788:11: note: ‘__Y’ was declared here
788 | __m128i __Y = __Y;
| ^~~
In function ‘__m512 _mm512_cvtepi32_ps(__m512i)’,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:268:34:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:14148:10: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
14148 | return (__m512) __builtin_ia32_cvtdq2ps512_mask ((__v16si) __A,
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14149 | (__v16sf)
| ~~~~~~~~~
14150 | _mm512_undefined_ps (),
| ~~~~~~~~~~~~~~~~~~~~~~~
14151 | (__mmask16) -1,
| ~~~~~~~~~~~~~~~
14152 | _MM_FROUND_CUR_DIRECTION);
| ~~~~~~~~~~~~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:188:10: note: ‘__Y’ was declared here
188 | __m512 __Y = __Y;
| ^~~
In function ‘__m512i _mm512_cvtps_epi32(__m512)’,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:331:47:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:14044:52: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
14044 | return (__m512i) __builtin_ia32_cvtps2dq512_mask ((__v16sf) __A,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
14045 | (__v16si)
| ~~~~~~~~~
14046 | _mm512_undefined_epi32 (),
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
14047 | (__mmask16) -1,
| ~~~~~~~~~~~~~~~
14048 | _MM_FROUND_CUR_DIRECTION);
| ~~~~~~~~~~~~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:206:11: note: ‘__Y’ was declared here
206 | __m512i __Y = __Y;
| ^~~
In function ‘__m512i _mm512_permutexvar_epi32(__m512i, __m512i)’,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:353:45:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:7027:53: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
7027 | return (__m512i) __builtin_ia32_permvarsi512_mask ((__v16si) __Y,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
7028 | (__v16si) __X,
| ~~~~~~~~~~~~~~
7029 | (__v16si)
| ~~~~~~~~~
7030 | _mm512_undefined_epi32 (),
| ~~~~~~~~~~~~~~~~~~~~~~~~~~
7031 | (__mmask16) -1);
| ~~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:206:11: note: ‘__Y’ was declared here
206 | __m512i __Y = __Y;
| ^~~
In function ‘__m128i _mm512_extracti32x4_epi32(__m512i, int)’,
inlined from ‘__m128i _mm512_castsi512_si128(__m512i)’ at /usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:15829:10,
inlined from ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’ at /home/elf/brego/src/pytorch/third_party/fbgemm/src/QuantUtilsAvx512.cc:369:25:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/avx512fintrin.h:6045:53: error: ‘__Y’ may be used uninitialized [-Werror=maybe-uninitialized]
6045 | return (__m128i) __builtin_ia32_extracti32x4_mask ((__v16si) __A,
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~
6046 | __imm,
| ~~~~~~
6047 | (__v4si)
| ~~~~~~~~
6048 | _mm_undefined_si128 (),
| ~~~~~~~~~~~~~~~~~~~~~~~
6049 | (__mmask8) -1);
| ~~~~~~~~~~~~~~
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/emmintrin.h: In function ‘void fbgemm::requantizeOutputProcessingGConvAvx512(uint8_t*, const int32_t*, const block_type_t&, int, int, const requantizationParams_t<BIAS_TYPE>&) [with bool A_SYMMETRIC = false; bool B_SYMMETRIC = false; QuantizationGranularity Q_GRAN = fbgemm::QuantizationGranularity::OUT_CHANNEL; bool HAS_BIAS = false; bool FUSE_RELU = false; int C_PER_G = 16; BIAS_TYPE = int]’:
/usr/lib/gcc/x86_64-pc-linux-gnu/12.1.0/include/emmintrin.h:788:11: note: ‘__Y’ was declared here
788 | __m128i __Y = __Y;
| ^~~
cc1plus: all warnings being treated as errors
ninja: build stopped: subcommand failed.
Versions
Collecting environment information...
PyTorch version: N/A
Is debug build: N/A
CUDA used to build PyTorch: N/A
ROCM used to build PyTorch: N/A
OS: Arch Linux (x86_64)
GCC version: (GCC) 12.1.0
Clang version: 13.0.1
CMake version: version 3.22.1
Libc version: glibc-2.35
Python version: 3.9.12 (main, Apr 5 2022, 06:56:58) [GCC 7.5.0] (64-bit runtime)
Python platform: Linux-5.17.7-arch1-2-x86_64-with-glibc2.35
Is CUDA available: N/A
CUDA runtime version: 11.7.64
GPU models and configuration: GPU 0: NVIDIA GeForce RTX 3080 Ti
Nvidia driver version: 515.43.04
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: N/A
Versions of relevant libraries:
[pip3] numpy==1.22.3
[conda] cudatoolkit 11.3.1 h2bc3f7f_2 anaconda
[conda] magma-cuda110 2.5.2 1 pytorch
[conda] mkl 2022.0.1 h06a4308_117
[conda] mkl-include 2022.0.1 h06a4308_117
[conda] numpy 1.22.3 py39h7a5d4dd_0
[conda] numpy-base 1.22.3 py39hb8be1f0_0