mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Disable failing test_int8_woq_mm_concat_cuda on slow grad check (#165331)
Same as https://github.com/pytorch/pytorch/pull/165147, I missed some Pull Request resolved: https://github.com/pytorch/pytorch/pull/165331 Approved by: https://github.com/bbeckca
This commit is contained in:
committed by
PyTorch MergeBot
parent
4e420415e8
commit
c86a7c5f5e
@ -138,6 +138,7 @@ class TestSelectAlgorithmCuda(BaseTestSelectAlgorithm):
|
||||
@parametrize("in_features", (128,))
|
||||
@parametrize("out_features", (64,))
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
|
||||
@unittest.skipIf(TEST_WITH_SLOW_GRADCHECK, "Leaking memory")
|
||||
def test_int8_woq_mm_concat_cuda(
|
||||
self, dtype, batch_size, mid_dim, in_features, out_features
|
||||
):
|
||||
|
Reference in New Issue
Block a user