[ROCm] Enabling additional UTs on ROCm (#115738)

Unskips mostly for dynamo/inductor UT.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/115738
Approved by: https://github.com/jithunnair-amd, https://github.com/malfet
This commit is contained in:
Jack Taylor
2024-01-09 08:36:07 +00:00
committed by PyTorch MergeBot
parent f0bbc2fcf5
commit db79ceb110
7 changed files with 4 additions and 22 deletions

View File

@ -3462,7 +3462,6 @@ class TestSparseCompressedTritonKernels(TestCase):
return d
@onlyCUDA
@skipIfRocm
@dtypes(torch.half, torch.bfloat16, torch.float)
@dtypesIfCUDA(torch.half, *[torch.bfloat16] if SM80OrLater else [], torch.float)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "Test requires Triton")
@ -3498,7 +3497,6 @@ class TestSparseCompressedTritonKernels(TestCase):
@parametrize("block_size", [16, 32, 64])
@parametrize("index_dtype", [torch.int32, torch.int64])
@onlyCUDA
@skipIfRocm
@dtypes(torch.half, torch.bfloat16, torch.float)
@dtypesIfCUDA(torch.half, *[torch.bfloat16] if SM80OrLater else [], torch.float)
@unittest.skipIf((not TEST_WITH_TORCHINDUCTOR) or (IS_FBCODE and IS_REMOTE_GPU) or torch._running_with_deploy(),
@ -3577,7 +3575,6 @@ class TestSparseCompressedTritonKernels(TestCase):
self.assertEqual(res_tri, res_dense)
@onlyCUDA
@skipIfRocm
@dtypes(torch.half)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU or torch._running_with_deploy(),
"Skipped for deploy and internal with remote GPUs")
@ -3787,7 +3784,6 @@ class TestSparseCompressedTritonKernels(TestCase):
@parametrize("blocksize", [2, '2x3', 16, '16x32', 32, 64])
@onlyCUDA
@skipIfRocm
@dtypes(torch.half, torch.bfloat16, torch.float)
@dtypesIfCUDA(torch.half, *[torch.bfloat16] if SM80OrLater else [], torch.float)
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "Test requires Triton")