Skip to content

Commit 42b868c

Browse files
committed
Use AT_ROCM_ENABLED() instead of USE_ROCM
1 parent 76c3c4d commit 42b868c

1 file changed

Lines changed: 2 additions & 2 deletions

File tree

aten/src/ATen/native/cuda/linalg/BatchLinearAlgebra.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3231,7 +3231,7 @@ void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singul
32313231
"Please rebuild with cuSOLVER.");
32323232
#endif
32333233
} else { // m >= n
3234-
#if !defined(USE_ROCM)
3234+
#if !AT_ROCM_ENABLED()
32353235
// On CUDA platform we use either cuBLAS or cuSOLVER here
32363236
// the batched vs looped dispatch is implemented based on the following performance results
32373237
// https://github.com/pytorch/pytorch/pull/54725#issuecomment-832234456
@@ -3244,7 +3244,7 @@ void lstsq_kernel(const Tensor& a, Tensor& b, Tensor& /*rank*/, Tensor& /*singul
32443244
// On ROCm platform we can only use MAGMA here
32453245
// If MAGMA is not available, an error will be thrown
32463246
gels_magma(a, b, infos);
3247-
#endif // USE_ROCM()
3247+
#endif // !AT_ROCM_ENABLED()
32483248
}
32493249
}
32503250

0 commit comments

Comments
 (0)