[dynamo] Remove all skipIfTorchDynamo in test_tensor_creation_ops.py (#154693)

Looks like they are no longer needed.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/154693
Approved by: https://github.com/Skylion007, https://github.com/zou3519
This commit is contained in:
Ryan Guo
2025-05-30 13:50:42 -07:00
committed by PyTorch MergeBot
parent 984b1a80e3
commit 9d3ad82ca7

View File

@ -33,7 +33,6 @@ from torch.testing._internal.common_utils import (
IS_S390X,
IS_ARM64,
parametrize,
skipIfTorchDynamo,
xfailIfTorchDynamo,
)
from torch.testing._internal.common_device_type import (
@ -196,7 +195,6 @@ class TestTensorCreation(TestCase):
self.assertEqual(x, torch.tensor([n] * numel, dtype=dt, device=device))
self.assertEqual(dt, x.dtype)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_roll(self, device):
numbers = torch.arange(1, 9, device=device)
@ -775,7 +773,6 @@ class TestTensorCreation(TestCase):
# Note: This test failed on XLA since its test cases are created by empty_strided which
# doesn't support overlapping sizes/strides in XLA impl
@skipIfTorchDynamo("TorchDynamo fails on this test for unknown reasons")
@onlyNativeDeviceTypes
def test_like_fn_stride_proparation_vs_tensoriterator_unary_op(self, device):
# Test like functions against tensoriterator based unary operator (exp) to
@ -1015,7 +1012,6 @@ class TestTensorCreation(TestCase):
expected = np.dstack(np_input)
self.assertEqual(actual, expected)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@dtypes(torch.int32, torch.int64)
def test_large_linspace(self, device, dtype):
start = torch.iinfo(dtype).min
@ -1284,7 +1280,6 @@ class TestTensorCreation(TestCase):
torch.tensor(bad_mock_seq, device=device)
self.assertEqual(torch.tensor([1.0, 2.0, 3.0], device=device), torch.tensor(good_mock_seq, device=device))
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
def test_simple_scalar_cast(self, device):
ok = [torch.tensor([1.5], device=device), torch.zeros(1, 1, 1, 1, device=device)]
ok_values = [1.5, 0]
@ -1536,7 +1531,6 @@ class TestTensorCreation(TestCase):
self.assertEqual(c1, expected)
self.assertEqual(c2, expected)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@skipMeta
def test_linlogspace_mem_overlap(self, device):
x = torch.rand(1, device=device).expand(10)
@ -2327,7 +2321,6 @@ class TestTensorCreation(TestCase):
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@suppress_warnings
@dtypesIfCPU(torch.float, torch.bfloat16, torch.float16)
@dtypes(torch.float)
@ -2358,7 +2351,6 @@ class TestTensorCreation(TestCase):
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
@ -2678,13 +2670,11 @@ class TestTensorCreation(TestCase):
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
test_fn(torch.linspace, np.linspace, steps)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@dtypes(torch.complex64)
def test_linspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.linspace, np.linspace,
device, dtype)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@dtypes(torch.complex64)
def test_logspace_vs_numpy_complex(self, device, dtype):
self._test_linspace_logspace_complex_helper(torch.logspace, np.logspace,
@ -2805,7 +2795,6 @@ class TestTensorCreation(TestCase):
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@dtypes(torch.float, torch.double, torch.long)
@parametrize("window", ['hann', 'hamming', 'bartlett', 'blackman'])
def test_signal_window_functions(self, device, dtype, window):
@ -2814,7 +2803,6 @@ class TestTensorCreation(TestCase):
@onlyNativeDeviceTypes
@precisionOverride({torch.bfloat16: 5e-2, torch.half: 1e-3})
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@dtypesIfCUDA(torch.float, torch.double, torch.bfloat16, torch.half, torch.long)
@dtypes(torch.float, torch.double, torch.long, torch.bfloat16, torch.float16)
def test_kaiser_window(self, device, dtype):
@ -2841,7 +2829,6 @@ class TestTensorCreation(TestCase):
# torch.signal.windows functions (except any with extra parameters)
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@skipIfTorchDynamo("Not a TorchDynamo suitable test")
@dtypes(torch.float, torch.double)
@parametrize("window", ['bartlett', 'blackman', 'cosine', 'hamming', 'hann', 'nuttall'])
def test_signal_windows_functions(self, device, dtype, window):
@ -2850,7 +2837,6 @@ class TestTensorCreation(TestCase):
# torch.signal.windows.kaiser
@onlyNativeDeviceTypes
@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
@dtypes(torch.float, torch.double)
def test_kaiser(self, device, dtype):
for num_test in range(50):
@ -3034,12 +3020,10 @@ class TestTensorCreation(TestCase):
self.assertEqual(fn(start, end, steps=100, device=device).dtype, dtype)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.linspace, device)
@skipIfTorchDynamo("TorchDynamo fails with unknown reason")
def test_logspace_deduction(self, device):
# Test deduction from input parameters.
self._test_linspace_logspace_deduction_helper(torch.logspace, device)
@ -4179,7 +4163,8 @@ class TestAsArray(TestCase):
t = torch.asarray(e)
self.assertEqual(t, original)
@skipIfTorchDynamo()
# Dynamo changes numpy scalar to array, thus skips the asserted error.
@xfailIfTorchDynamo
@onlyCPU
def test_numpy_scalars(self, device):
scalar = np.float64(0.5)