Further fix failing tests in test/inductor/test_analysis.py (#160070)

This is a follow up on #159800 as other tests are still failing.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/160070
Approved by: https://github.com/aorenste
This commit is contained in:
Aidyn-A
2025-08-07 17:32:58 +00:00
committed by PyTorch MergeBot
parent 8399cf88ce
commit 0bd3af4fb8

View File

@ -337,6 +337,7 @@ class TestAnalysis(TestCase):
],
)
@skipIf(not IS_BIG_GPU, "we can't use Triton only as a backend for max autotune")
@torch._inductor.config.patch(force_disable_caches=True)
def test_triton_has_metadata(self, device, dtype, maxat):
"""
make sure that the chrome trace of triton kernels contains certain values
@ -359,7 +360,6 @@ class TestAnalysis(TestCase):
options={
"benchmark_kernel": True,
"max_autotune_gemm_backends": backends,
"force_disable_caches": True,
"max_autotune": max_autotune,
},
)
@ -507,6 +507,7 @@ class TestAnalysis(TestCase):
@unittest.skipIf(
not IS_BIG_GPU, "we can't use Triton only as a backend for max autotune"
)
@torch._inductor.config.patch(force_disable_caches=True)
def test_pointwise_bandwidth(self, device, dtype, maxat):
# this tests to see if we can only use a Triton backend for max autotune
max_autotune, backends = maxat
@ -518,7 +519,6 @@ class TestAnalysis(TestCase):
options={
"benchmark_kernel": True,
"max_autotune_gemm_backends": backends,
"force_disable_caches": True,
"max_autotune": max_autotune,
},
)