Remove old ROCm skip conditions in tests (#164058)

This PR removes skip conditions for ROCM <= 3.5.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/164058
Approved by: https://github.com/kwen2501
This commit is contained in:
Yuanyuan Chen
2025-09-29 03:00:54 +00:00
committed by PyTorch MergeBot
parent 811c693c49
commit 352197c508

View File

@ -1,6 +1,5 @@
# Owner(s): ["oncall: distributed"]
import re
import sys
import torch
@ -29,12 +28,6 @@ from torch.testing._internal.common_utils import (
)
HIP_VERSION = (
0.0
if torch.version.hip is None
else float(re.search(r"^\d+\.\d+", torch.version.hip)[0])
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
@ -66,9 +59,6 @@ class TestNCCL(TestCase):
self.assertIsInstance(uid, bytes)
self.assertGreater(len(uid), 1)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_ROCM and HIP_VERSION < 3.5, "Skip NCCL tests for ROCm"
)
@skip_but_pass_in_sandcastle_if(IS_WINDOWS, "NCCL doesn't support Windows")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*broadcast_dtypes)
@ -91,9 +81,6 @@ class TestNCCL(TestCase):
for i in range(torch.cuda.device_count()):
self.assertEqual(tensors[i], expected)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_ROCM and HIP_VERSION < 3.5, "Skip NCCL tests for ROCm"
)
@skip_but_pass_in_sandcastle_if(IS_WINDOWS, "NCCL doesn't support Windows")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)
@ -118,10 +105,6 @@ class TestNCCL(TestCase):
@skip_but_pass_in_sandcastle_if(IS_WINDOWS, "NCCL doesn't support Windows")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "only one GPU detected")
@skip_but_pass_in_sandcastle_if(
TEST_WITH_ROCM and HIP_VERSION < 3.5 and dtype == torch.bfloat16, # noqa: F821
"Skip bfloat16 test for ROCm < 3.5",
)
@dtypes(*datatypes)
def test_all_reduce(self, device, dtype):
cpu_tensors = [
@ -151,9 +134,6 @@ class TestNCCL(TestCase):
for tensor in tensors:
self.assertEqual(tensor, expected)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_ROCM and HIP_VERSION < 3.5, "Skip NCCL tests for ROCm"
)
@skip_but_pass_in_sandcastle_if(IS_WINDOWS, "NCCL doesn't support Windows")
def test_collective_errors(self, device):
t = torch.rand(10).cuda(0)
@ -182,9 +162,6 @@ class TestNCCL(TestCase):
):
nccl.reduce_scatter(t, t)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_ROCM and HIP_VERSION < 3.5, "Skip NCCL tests for ROCm"
)
@skip_but_pass_in_sandcastle_if(IS_WINDOWS, "NCCL doesn't support Windows")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)
@ -211,9 +188,6 @@ class TestNCCL(TestCase):
for tensor in outputs:
self.assertEqual(tensor, expected)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_ROCM and HIP_VERSION < 3.5, "Skip NCCL tests for ROCm"
)
@skip_but_pass_in_sandcastle_if(IS_WINDOWS, "NCCL doesn't support Windows")
@skip_but_pass_in_sandcastle_if(not TEST_MULTIGPU, "only one GPU detected")
@dtypes(*datatypes)