mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Remove test conditions for CUDA<12 (#163495)
Because it required that CUDA >=12. Pull Request resolved: https://github.com/pytorch/pytorch/pull/163495 Approved by: https://github.com/janeyx99
This commit is contained in:
committed by
PyTorch MergeBot
parent
8d81564df5
commit
5d749ceb92
@ -4,11 +4,7 @@ import sys
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_cuda import (
|
||||
_get_torch_cuda_version,
|
||||
TEST_CUDA,
|
||||
TEST_MULTIGPU,
|
||||
)
|
||||
from torch.testing._internal.common_cuda import TEST_CUDA, TEST_MULTIGPU
|
||||
from torch.testing._internal.common_utils import NoTest, run_tests, TestCase
|
||||
|
||||
|
||||
@ -41,12 +37,8 @@ class TestCudaPrimaryCtx(TestCase):
|
||||
# Initially, we should not have any context on device 0.
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
torch.cuda.set_device(0)
|
||||
if _get_torch_cuda_version() >= (12, 0):
|
||||
# Now after the device was set, the context should present in CUDA 12.
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(0))
|
||||
else:
|
||||
# In CUDA 11 the context should not be created.
|
||||
self.assertFalse(torch._C._cuda_hasPrimaryContext(0))
|
||||
# Now after the device was set, the context should present in CUDA 12.
|
||||
self.assertTrue(torch._C._cuda_hasPrimaryContext(0))
|
||||
|
||||
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
|
||||
def test_str_repr(self):
|
||||
|
@ -605,15 +605,6 @@ class TestLinalg(TestCase):
|
||||
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
|
||||
torch.linalg.lstsq(a, b, driver='fictitious_driver')
|
||||
|
||||
# cuSOLVER path supports underdetermined systems
|
||||
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
|
||||
cusolver_not_available = (version < (10, 1))
|
||||
|
||||
if device != 'cpu' and cusolver_not_available:
|
||||
a = torch.rand(2, 3, dtype=dtype, device=device)
|
||||
b = torch.rand(2, 1, dtype=dtype, device=device)
|
||||
with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):
|
||||
torch.linalg.lstsq(a, b)
|
||||
|
||||
@skipCUDAIfNoMagma
|
||||
@skipCPUIfNoLapack
|
||||
@ -7354,7 +7345,7 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
|
||||
|
||||
if TEST_WITH_ROCM:
|
||||
_test(17, k, n, use_transpose_a, use_transpose_b, True)
|
||||
elif version >= (11, 7):
|
||||
else:
|
||||
if not use_transpose_a and use_transpose_b:
|
||||
if SM80OrLater or (version >= (12, 3) and (SM70 or SM75)):
|
||||
_test(17, k, n, use_transpose_a, use_transpose_b, version > (11, 7))
|
||||
@ -7380,16 +7371,12 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2:
|
||||
with self.assertRaisesRegex(RuntimeError,
|
||||
"CUDA error: CUBLAS_STATUS_NOT_SUPPORTED when calling cublasLtMatmul"):
|
||||
_test(17, k, n, use_transpose_a, use_transpose_b)
|
||||
else:
|
||||
with self.assertRaisesRegex(RuntimeError, "_int_mm_out_cuda not compiled for CUDA"):
|
||||
_test(17, k, n, use_transpose_a, use_transpose_b, False)
|
||||
|
||||
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
|
||||
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
|
||||
@skipCUDAIfRocmVersionLessThan((6, 0))
|
||||
@onlyCUDA
|
||||
def test__int_mm_errors(self, device):
|
||||
version = _get_torch_cuda_version()
|
||||
|
||||
def genf_int(x, y):
|
||||
return torch.empty((x, y), dtype=torch.int8, device=device)
|
||||
|
@ -20,7 +20,7 @@ from torch.testing._internal.common_device_type import \
|
||||
largeTensorTest)
|
||||
from torch.testing._internal.common_methods_invocations import \
|
||||
(op_db, sparse_csr_unary_ufuncs, ReductionOpInfo)
|
||||
from torch.testing._internal.common_cuda import _get_torch_cuda_version, TEST_CUDA
|
||||
from torch.testing._internal.common_cuda import TEST_CUDA
|
||||
from torch.testing._internal.common_dtype import (
|
||||
floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and,
|
||||
all_types_and_complex, floating_and_complex_types_and)
|
||||
@ -40,23 +40,11 @@ load_tests = load_tests
|
||||
|
||||
no_mkl_sparse = IS_WINDOWS or not TEST_MKL
|
||||
|
||||
def _check_cusparse_triangular_solve_available():
|
||||
version = _get_torch_cuda_version()
|
||||
# cusparseSpSM was added in 11.3.1 but we don't have access to patch version
|
||||
min_supported_version = (11, 4)
|
||||
return version >= min_supported_version
|
||||
|
||||
def _check_cusparse_spgemm_available():
|
||||
# cusparseSpGEMM was added in 11.0
|
||||
return not TEST_WITH_ROCM
|
||||
|
||||
def _check_cusparse_sddmm_available():
|
||||
if TEST_WITH_ROCM:
|
||||
return True
|
||||
version = _get_torch_cuda_version()
|
||||
# cusparseSDDMM was added in 11.2.1 but we don't have access to patch version
|
||||
min_supported_version = (11, 3)
|
||||
return version >= min_supported_version
|
||||
|
||||
_sparse_csr_ops = list(filter(lambda op: op.supports_sparse_csr, op_db))
|
||||
_sparse_compressed_ops = list(filter(lambda op: (op.supports_sparse_csr or op.supports_sparse_csc
|
||||
@ -2347,10 +2335,6 @@ class TestSparseCSR(TestCase):
|
||||
run_test(index_dtype)
|
||||
|
||||
@skipCPUIfNoMklSparse
|
||||
@skipCUDAIf(
|
||||
not _check_cusparse_triangular_solve_available(),
|
||||
"cuSparse Generic API SpSV is not available"
|
||||
)
|
||||
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
||||
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
|
||||
torch.float64: 1e-8, torch.complex128: 1e-8})
|
||||
@ -2427,10 +2411,6 @@ class TestSparseCSR(TestCase):
|
||||
itertools.product([True, False], repeat=4)):
|
||||
run_test(n, k, upper, unitriangular, transpose, zero)
|
||||
|
||||
@skipCUDAIf(
|
||||
not _check_cusparse_sddmm_available(),
|
||||
"cuSparse Generic API SDDMM is not available"
|
||||
)
|
||||
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
||||
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
|
||||
torch.float64: 1e-8, torch.complex128: 1e-8})
|
||||
@ -2481,10 +2461,6 @@ class TestSparseCSR(TestCase):
|
||||
for op_a, op_b in itertools.product([True, False], repeat=2):
|
||||
run_test(c, a, b, op_a, op_b)
|
||||
|
||||
@skipCUDAIf(
|
||||
not _check_cusparse_sddmm_available(),
|
||||
"cuSparse Generic API SDDMM is not available"
|
||||
)
|
||||
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
||||
def test_sampled_addmm_autograd(self, device, dtype):
|
||||
from torch.testing._internal.common_methods_invocations import sample_inputs_sparse_sampled_addmm
|
||||
@ -2514,10 +2490,6 @@ class TestSparseCSR(TestCase):
|
||||
@onlyCUDA
|
||||
# It works on ROCm and CUDA issue is currently active
|
||||
@skipCUDAIf(not TEST_WITH_ROCM, "Causes CUDA memory exception, see https://github.com/pytorch/pytorch/issues/72177")
|
||||
@skipCUDAIf(
|
||||
not _check_cusparse_sddmm_available(),
|
||||
"cuSparse Generic API SDDMM is not available"
|
||||
)
|
||||
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
||||
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
|
||||
torch.float64: 1e-8, torch.complex128: 1e-8})
|
||||
@ -2533,10 +2505,6 @@ class TestSparseCSR(TestCase):
|
||||
run_test(c, a, b)
|
||||
|
||||
@onlyCUDA
|
||||
@skipCUDAIf(
|
||||
not _check_cusparse_sddmm_available(),
|
||||
"cuSparse Generic API SDDMM is not available"
|
||||
)
|
||||
@dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128)
|
||||
def test_sampled_addmm_errors(self, device, dtype):
|
||||
# test that the errors are the same for dense and sparse sampled versions
|
||||
@ -2816,10 +2784,6 @@ class TestSparseCSR(TestCase):
|
||||
dense_output.backward(dense_covector)
|
||||
self.assertEqual(sparse_input.grad, dense_input.grad)
|
||||
|
||||
@skipCUDAIf(
|
||||
not _check_cusparse_sddmm_available(),
|
||||
"cuSparse Generic API SDDMM is not available"
|
||||
)
|
||||
@dtypes(torch.float64)
|
||||
def test_autograd_dense_output_addmm(self, device, dtype):
|
||||
from torch.testing._internal.common_methods_invocations import sample_inputs_addmm
|
||||
|
@ -1240,11 +1240,8 @@ class TestSparseSemiStructuredCUSPARSELT(TestCase):
|
||||
version = _get_torch_cuda_version()
|
||||
assert torch.backends.cusparselt.is_available()
|
||||
|
||||
# CUDA 11.8 has cuSPARSELt v0.4.0 support
|
||||
if version == (11, 8):
|
||||
assert torch.backends.cusparselt.version() == 400
|
||||
# PyTorch CUDA 12.4+ using cuSPARSELt v0.6.2+
|
||||
elif version >= (12, 4):
|
||||
if version >= (12, 4):
|
||||
assert torch.backends.cusparselt.version() >= 602
|
||||
else:
|
||||
assert torch.backends.cusparselt.version() is None
|
||||
|
Reference in New Issue
Block a user