Disable failing test_int8_woq_mm_concat_cuda on slow grad check (#165331)

Same as https://github.com/pytorch/pytorch/pull/165147, I missed some

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165331
Approved by: https://github.com/bbeckca
This commit is contained in:
Catherine Lee
2025-10-13 17:07:57 +00:00
committed by PyTorch MergeBot
parent 4e420415e8
commit c86a7c5f5e

View File

@ -138,6 +138,7 @@ class TestSelectAlgorithmCuda(BaseTestSelectAlgorithm):
@parametrize("in_features", (128,))
@parametrize("out_features", (64,))
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
@unittest.skipIf(TEST_WITH_SLOW_GRADCHECK, "Leaking memory")
def test_int8_woq_mm_concat_cuda(
self, dtype, batch_size, mid_dim, in_features, out_features
):