Revert "Add Triton CPU as an Inductor backend (#133408)"

This reverts commit 31c0467594c7c41c8e8ff1828bf01fa31fc4454f.

Reverted https://github.com/pytorch/pytorch/pull/133408 on behalf of https://github.com/int3 due to internal tests failing ([comment](https://github.com/pytorch/pytorch/pull/133408#issuecomment-2379692517))
This commit is contained in:
PyTorch MergeBot
2024-09-27 16:54:27 +00:00
parent 17f396b0b4
commit 36428f91e9
34 changed files with 258 additions and 455 deletions

View File

@ -38,10 +38,10 @@ from torch.testing._internal.common_utils import (
IS_WINDOWS,
)
from torch.testing._internal.inductor_utils import HAS_GPU
import pytest
from torch.utils._triton import has_triton
SEMI_STRUCTURED_SUPPORTED_BACKENDS = dict()
_IS_SM8X = False
@ -981,7 +981,7 @@ class TestSparseSemiStructuredCUTLASS(TestCase):
torch.backends.cuda.matmul.allow_tf32 = orig
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch")
@inference_dtypes
def test_conversions(self, device, dtype):
@ -1009,7 +1009,7 @@ class TestSparseSemiStructuredCUTLASS(TestCase):
for r, c in shapes:
run_test(r, c, device, dtype)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@unittest.skipIf(not has_triton(), "Test needs triton and recent GPU arch")
@inference_dtypes
def test_conversions_all_patterns(self, device, dtype):
r, c = 32, 128