From c86a7c5f5e87af583ac5baedbf45f01db21c7dbc Mon Sep 17 00:00:00 2001 From: Catherine Lee Date: Mon, 13 Oct 2025 17:07:57 +0000 Subject: [PATCH] Disable failing test_int8_woq_mm_concat_cuda on slow grad check (#165331) Same as https://github.com/pytorch/pytorch/pull/165147, I missed some Pull Request resolved: https://github.com/pytorch/pytorch/pull/165331 Approved by: https://github.com/bbeckca --- test/inductor/test_cuda_select_algorithm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/inductor/test_cuda_select_algorithm.py b/test/inductor/test_cuda_select_algorithm.py index f580aaa5a1da..7fd9fadc1ccc 100644 --- a/test/inductor/test_cuda_select_algorithm.py +++ b/test/inductor/test_cuda_select_algorithm.py @@ -138,6 +138,7 @@ class TestSelectAlgorithmCuda(BaseTestSelectAlgorithm): @parametrize("in_features", (128,)) @parametrize("out_features", (64,)) @unittest.skipIf(not TEST_CUDA, "CUDA not available") + @unittest.skipIf(TEST_WITH_SLOW_GRADCHECK, "Leaking memory") def test_int8_woq_mm_concat_cuda( self, dtype, batch_size, mid_dim, in_features, out_features ):