[ROCm] add skipCUDAIfVersionLessThan to unskip test_jiterator for ROCm (#99197)

This unskips 121 tests that the decorator `@skipCUDAIf(_get_torch_cuda_version() < (11, 6))` was unintentionally skipping for ROCm.  Other decorators such as `skipCUDAVersionIn` will only activate for the CUDA device, not the CPU or ROCm-as-CUDA.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/99197
Approved by: https://github.com/ngimel
This commit is contained in:
Jeff Daily
2023-04-17 16:05:16 +00:00
committed by PyTorch MergeBot
parent e549ad0046
commit 0711bff9aa
2 changed files with 21 additions and 6 deletions

View File

@ -8,8 +8,7 @@ from itertools import product
from torch.testing._internal.common_utils import TestCase, parametrize, run_tests, TEST_CUDA, NoTest
from torch.testing._internal.common_dtype import all_types_and_complex_and
from torch.testing._internal.common_device_type import (
skipCUDAIfRocm, skipCUDAIf, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
from torch.testing._internal.common_cuda import _get_torch_cuda_version
skipCUDAIfVersionLessThan, instantiate_device_type_tests, dtypes, toleranceOverride, tol)
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
@ -40,10 +39,10 @@ class TestPythonJiterator(TestCase):
self.assertEqual(expected, result)
@skipCUDAIfRocm
# See https://github.com/pytorch/pytorch/pull/76394#issuecomment-1118018287 for details
@skipCUDAIf(_get_torch_cuda_version() < (11, 6), "On cuda 11.3, nvrtcCompileProgram is taking too long to "
"compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.")
# On cuda 11.3, nvrtcCompileProgram is taking too long to
# compile jiterator generated kernels for non-contiguous input that requires dynamic-casting.
@skipCUDAIfVersionLessThan((11, 6))
@parametrize("shape_strides", [
(([3, 3], [1, 3]), ([3, 1], [1, 3])), # non-contiguous
])