[CI] Disable some tests that are failing in periodic (#150059)

Disabling some tests to restore periodic

nogpu avx512 timeout:
59f14d19ae (38492953496-box)

profiler failure: 7ae0ce6360 (38461255009-box)

test_accelerator failure:
87bfd66c3c (39476723746-box)
origin: 146098

test_overrides failure:
bf752c36da (39484562957-box)
origin: 146098

inductor cpu repro:
bb9c426024 (38447525659-box)

functorch eager transforms:
8f858e226b (39488068620-box)
f2cea01f71 (39555064878)
b5281a4a18 (39599355600)
either 148288 or 148261?

2ec9aceaeb/1

Pull Request resolved: https://github.com/pytorch/pytorch/pull/150059
Approved by: https://github.com/ZainRizvi, https://github.com/atalman, https://github.com/malfet
This commit is contained in:
Catherine Lee
2025-03-28 20:31:32 +00:00
committed by PyTorch MergeBot
parent 2bd5bfa3ce
commit 9092dd2e82
6 changed files with 48 additions and 2 deletions

View File

@ -74,6 +74,7 @@ from torch.testing._internal.common_utils import (
skipIfRocm,
skipIfTorchDynamo,
subtest,
TEST_CUDA_MEM_LEAK_CHECK,
TEST_WITH_TORCHDYNAMO,
TestCase,
xfailIfTorchDynamo,
@ -2865,6 +2866,10 @@ class TestLinearize(TestCase):
self.assertEqual(actual_jvp, expected_jvp)
@dtypes(torch.float)
@unittest.skipIf(
TEST_CUDA_MEM_LEAK_CHECK,
"Leaking memory, see https://github.com/pytorch/pytorch/pull/150059 for example",
)
def test_linearize_return(self, device, dtype):
x_p = make_tensor((3, 1), device=device, dtype=dtype)
x_t = make_tensor((3, 1), device=device, dtype=dtype)
@ -2879,6 +2884,10 @@ class TestLinearize(TestCase):
self.assertEqual(actual_jvp, expected_jvp)
@dtypes(torch.float)
@unittest.skipIf(
TEST_CUDA_MEM_LEAK_CHECK,
"Leaking memory, see https://github.com/pytorch/pytorch/pull/150059 for example",
)
def test_linearize_composition_vmap(self, device, dtype):
x_p = make_tensor((3, 1), device=device, dtype=dtype)
x_t = make_tensor((3, 3, 1), device=device, dtype=dtype)
@ -2897,6 +2906,10 @@ class TestLinearize(TestCase):
self.assertEqual(actual_batched_jvp, expected_batched_jvp)
@dtypes(torch.float)
@unittest.skipIf(
TEST_CUDA_MEM_LEAK_CHECK,
"Leaking memory, see https://github.com/pytorch/pytorch/pull/150059 for example",
)
def test_linearize_composition_grad(self, device, dtype):
x_p = make_tensor((3,), device=device, dtype=dtype)
x_t = make_tensor((3,), device=device, dtype=dtype)
@ -2916,6 +2929,10 @@ class TestLinearize(TestCase):
self.assertEqual(actual_batched_jvp, expected_batched_jvp)
@dtypes(torch.float)
@unittest.skipIf(
TEST_CUDA_MEM_LEAK_CHECK,
"Leaking memory, see https://github.com/pytorch/pytorch/pull/150059 for example",
)
def test_linearize_nested_input_nested_output(self, device, dtype):
x_p = make_tensor((3, 1), device=device, dtype=dtype)
x_t = make_tensor((3, 1), device=device, dtype=dtype)
@ -5151,6 +5168,10 @@ class TestCompileTransforms(TestCase):
# torch.compile is not supported on Windows CUDA.
# Triton only supports GPU with SM70 or later.
@expectedFailureIf((IS_WINDOWS and TEST_CUDA) or (TEST_CUDA and not SM70OrLater))
@unittest.skipIf(
TEST_CUDA_MEM_LEAK_CHECK,
"Leaking memory, see https://github.com/pytorch/pytorch/pull/150059 for example",
)
def test_compile_vmap_hessian(self, device):
# The model and inputs are a smaller version
# of code at benchmark repo: