mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[OpenReg] Strengthen Openreg's execution limits to minimize the waste of computing resources (#161918)
Currently, OpenReg supports Linux, Windows, and OS X, ensuring stability and ease of integration with third-party devices across all three platforms. It also doesn't rely on any other accelerators (such as CUDA or MPS). Therefore, to minimize computational resource usage, `test_openreg` can be added to certain BLOCKLISTS to prevent its execution, limiting OpenReg's execution to only necessary scenarios. Pull Request resolved: https://github.com/pytorch/pytorch/pull/161918 Approved by: https://github.com/albanD ghstack dependencies: #161917
This commit is contained in:
@ -2,18 +2,11 @@
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
from torch.nn.attention import SDPBackend
|
||||
from torch.testing._internal.common_nn import NNTestCase
|
||||
from torch.testing._internal.common_utils import (
|
||||
run_tests,
|
||||
skipIfTorchDynamo,
|
||||
skipIfXpu,
|
||||
TEST_XPU,
|
||||
TestCase,
|
||||
)
|
||||
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
|
||||
|
||||
|
||||
SDPAShape = collections.namedtuple(
|
||||
@ -123,7 +116,6 @@ class TestSTUB(TestCase):
|
||||
|
||||
|
||||
class TestQuantization(TestCase):
|
||||
@skipIfXpu(msg="missing kernel for openreg")
|
||||
def test_quantize(self):
|
||||
x = torch.randn(3, 4, 5, dtype=torch.float32, device="openreg")
|
||||
quantized_tensor = torch.quantize_per_tensor(x, 0.1, 10, torch.qint8)
|
||||
@ -206,7 +198,6 @@ class TestFallback(TestCase):
|
||||
self.assertEqual(z_cpu, z[1])
|
||||
|
||||
|
||||
@unittest.skipIf(TEST_XPU, "XPU does not support cppextension currently")
|
||||
class TestSDPA(NNTestCase):
|
||||
@skipIfTorchDynamo()
|
||||
def test_fused_sdp_choice_privateuseone(self):
|
||||
|
Reference in New Issue
Block a user