mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[Break XPU] Fix XPU UT failures introduced by community. (#156091)
Fixes #15089, Fixes #156063, Fixes #155689, Fixes #155692, Fixes #156146 Pull Request resolved: https://github.com/pytorch/pytorch/pull/156091 Approved by: https://github.com/jansel
This commit is contained in:
committed by
PyTorch MergeBot
parent
38e1e5d54c
commit
3dabc351bb
@ -11,7 +11,7 @@ from torch._functorch._aot_autograd.autograd_cache import (
|
||||
BundledAOTAutogradCacheEntry,
|
||||
)
|
||||
from torch._inductor.test_case import TestCase as InductorTestCase
|
||||
from torch.testing._internal.inductor_utils import requires_triton
|
||||
from torch.testing._internal.inductor_utils import GPU_TYPE, requires_triton
|
||||
|
||||
|
||||
@functorch_config.patch({"enable_autograd_cache": True})
|
||||
@ -39,7 +39,7 @@ class PrecompileContextTests(InductorTestCase):
|
||||
compiled_fn = torch.compile(simple_function)
|
||||
|
||||
# Run the compiled function
|
||||
x = torch.randn(10, device="cuda", requires_grad=True)
|
||||
x = torch.randn(10, device=GPU_TYPE, requires_grad=True)
|
||||
result = compiled_fn(x)
|
||||
result.sum().backward()
|
||||
# Check that PrecompileContext._new_cache_artifacts_by_key has length 1
|
||||
@ -80,7 +80,7 @@ class PrecompileContextTests(InductorTestCase):
|
||||
compiled_fn = torch.compile(simple_function)
|
||||
|
||||
# Run the compiled function
|
||||
x = torch.randn(10, device="cuda", requires_grad=True)
|
||||
x = torch.randn(10, device=GPU_TYPE, requires_grad=True)
|
||||
result = compiled_fn(x)
|
||||
result.sum().backward()
|
||||
# Check that PrecompileContext._new_cache_artifacts_by_key has length 1
|
||||
|
@ -242,7 +242,6 @@ test_failures = {
|
||||
"test_pointwise_laguerre_polynomial_l_dynamic_shapes": TestFailure(("cuda", "xpu")),
|
||||
"test_pointwise_legendre_polynomial_p_dynamic_shapes": TestFailure(("cuda", "xpu")),
|
||||
"test_polar_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu"), is_skip=True),
|
||||
"test_randint_distribution_dynamic_shapes": TestFailure(("cuda", "xpu")),
|
||||
"test_randn_generator_dynamic_shapes": TestFailure(("cpu",)),
|
||||
"test_randn_like_empty_dynamic_shapes": TestFailure(("cpu", "cuda", "xpu")),
|
||||
"test_single_elem_dynamic_shapes": TestFailure(("cpu",)),
|
||||
|
@ -61,7 +61,6 @@ test_failures = {
|
||||
"test_AllenaiLongformerBase_repro_dynamic_shapes": TestFailure(
|
||||
("cpu", "cuda", "xpu")
|
||||
),
|
||||
"test_randint_distribution_dynamic_shapes": TestFailure(("xpu",)),
|
||||
}
|
||||
if not torch._inductor.config.cpp_wrapper:
|
||||
test_failures["test_conv_inference_heuristics_dynamic_shapes"] = TestFailure(
|
||||
|
@ -13,6 +13,7 @@ from torch.testing._internal.common_utils import (
|
||||
IS_LINUX,
|
||||
run_tests,
|
||||
skipIfTorchDynamo,
|
||||
skipIfXpu,
|
||||
TestCase,
|
||||
)
|
||||
|
||||
@ -365,6 +366,7 @@ class TestOpenReg(TestCase):
|
||||
self.assertEqual(y.to(device="cpu"), torch.tensor([[1, 1], [2, 2], [3, 3]]))
|
||||
self.assertEqual(x.data_ptr(), y.data_ptr())
|
||||
|
||||
@skipIfXpu(msg="missing kernel for openreg")
|
||||
def test_quantize(self):
|
||||
x = torch.randn(3, 4, 5, dtype=torch.float32, device="openreg")
|
||||
quantized_tensor = torch.quantize_per_tensor(x, 0.1, 10, torch.qint8)
|
||||
|
Reference in New Issue
Block a user