[OpenReg] move fallback tests into test_openreg.py (#158441)

----

- move fallback tests into test_operneg
- remove the test_cpp_extensions_open_device_registration.py
Pull Request resolved: https://github.com/pytorch/pytorch/pull/158441
Approved by: https://github.com/albanD
ghstack dependencies: #158415, #158440
This commit is contained in:
FFFrog
2025-07-24 11:46:12 +08:00
committed by PyTorch MergeBot
parent b635359e4c
commit 4261e26a8b
5 changed files with 42 additions and 104 deletions

View File

@ -1555,7 +1555,7 @@ test_executorch() {
test_linux_aarch64() { test_linux_aarch64() {
python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \ python test/run_test.py --include test_modules test_mkldnn test_mkldnn_fusion test_openmp test_torch test_dynamic_shapes \
test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \ test_transformers test_multiprocessing test_numpy_interop test_autograd test_binary_ufuncs test_complex test_spectral_ops \
test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops test_cpp_extensions_open_device_registration \ test_foreach test_reductions test_unary_ufuncs test_tensor_creation_ops test_ops \
--shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose --shard "$SHARD_NUMBER" "$NUM_TEST_SHARDS" --verbose
# Dynamo tests # Dynamo tests

View File

@ -188,7 +188,6 @@ S390X_BLOCKLIST = [
"onnx/test_utility_funs", "onnx/test_utility_funs",
"profiler/test_profiler", "profiler/test_profiler",
"test_ao_sparsity", "test_ao_sparsity",
"test_cpp_extensions_open_device_registration",
"test_jit", "test_jit",
"test_metal", "test_metal",
"test_mps", "test_mps",
@ -271,7 +270,6 @@ XPU_TEST = [
RUN_PARALLEL_BLOCKLIST = [ RUN_PARALLEL_BLOCKLIST = [
"test_extension_utils", "test_extension_utils",
"test_cpp_extensions_jit", "test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
"test_cpp_extensions_stream_and_event", "test_cpp_extensions_stream_and_event",
"test_cpp_extensions_mtia_backend", "test_cpp_extensions_mtia_backend",
"test_jit_disabled", "test_jit_disabled",
@ -1254,7 +1252,6 @@ CUSTOM_HANDLERS = {
"test_ci_sanity_check_fail": run_ci_sanity_check, "test_ci_sanity_check_fail": run_ci_sanity_check,
"test_autoload_enable": test_autoload_enable, "test_autoload_enable": test_autoload_enable,
"test_autoload_disable": test_autoload_disable, "test_autoload_disable": test_autoload_disable,
"test_cpp_extensions_open_device_registration": run_test_with_openreg,
"test_openreg": run_test_with_openreg, "test_openreg": run_test_with_openreg,
"test_transformers_privateuse1": run_test_with_openreg, "test_transformers_privateuse1": run_test_with_openreg,
} }

View File

@ -1,99 +0,0 @@
# Owner(s): ["module: cpp-extensions"]
import os
import unittest
import torch_openreg # noqa: F401
import torch
import torch.testing._internal.common_utils as common
import torch.utils.cpp_extension
@unittest.skipIf(common.TEST_XPU, "XPU does not support cppextension currently")
@common.markDynamoStrictTest
class TestCppExtensionOpenRegistration(common.TestCase):
"""Tests Open Device Registration with C++ extensions."""
module = None
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
assert self.module is not None
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
common.remove_cpp_extensions_build_root()
cls.module = torch.utils.cpp_extension.load(
name="custom_device_extension",
sources=[
"cpp_extensions/open_registration_extension.cpp",
],
extra_include_paths=["cpp_extensions"],
extra_cflags=["-g"],
verbose=True,
)
def test_open_device_scalar_type_fallback(self):
z_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(torch.int64)
z = torch.triu_indices(3, 3, device="openreg")
self.assertEqual(z_cpu, z)
def test_open_device_tensor_type_fallback(self):
# create tensors located in custom device
x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("openreg")
y = torch.Tensor([1, 0, 2]).to("openreg")
# create result tensor located in cpu
z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
# Check that our device is correct.
device = self.module.custom_device()
self.assertTrue(x.device == device)
self.assertFalse(x.is_cpu)
# call sub op, which will fallback to cpu
z = torch.sub(x, y)
self.assertEqual(z_cpu, z)
# call index op, which will fallback to cpu
z_cpu = torch.Tensor([3, 1])
y = torch.Tensor([1, 0]).long().to("openreg")
z = x[y, y]
self.assertEqual(z_cpu, z)
def test_open_device_tensorlist_type_fallback(self):
# create tensors located in custom device
v_openreg = torch.Tensor([1, 2, 3]).to("openreg")
# create result tensor located in cpu
z_cpu = torch.Tensor([2, 4, 6])
# create tensorlist for foreach_add op
x = (v_openreg, v_openreg)
y = (v_openreg, v_openreg)
# Check that our device is correct.
device = self.module.custom_device()
self.assertTrue(v_openreg.device == device)
self.assertFalse(v_openreg.is_cpu)
# call _foreach_add op, which will fallback to cpu
z = torch._foreach_add(x, y)
self.assertEqual(z_cpu, z[0])
self.assertEqual(z_cpu, z[1])
# call _fused_adamw_ with undefined tensor.
self.module.fallback_with_undefined_tensor()
if __name__ == "__main__":
common.run_tests()

View File

@ -580,6 +580,47 @@ class TestOpenReg(TestCase):
x_out = x_out.to("cpu") x_out = x_out.to("cpu")
self.assertEqual(x_in, x_out) self.assertEqual(x_in, x_out)
# fallback
def test_scalar_type_fallback(self):
x_cpu = torch.Tensor([[0, 0, 0, 1, 1, 2], [0, 1, 2, 1, 2, 2]]).to(torch.int64)
x = torch.triu_indices(3, 3, device="openreg")
self.assertEqual(x_cpu, x)
def test_tensor_type_fallback(self):
x = torch.Tensor([[1, 2, 3], [2, 3, 4]]).to("openreg")
y = torch.Tensor([1, 0, 2]).to("openreg")
self.assertTrue(x.device.type, "openreg")
self.assertFalse(x.is_cpu)
z_cpu = torch.Tensor([[0, 2, 1], [1, 3, 2]])
# call sub op, which will fallback to cpu
z = torch.sub(x, y)
self.assertEqual(z_cpu, z)
# call index op, which will fallback to cpu
z_cpu = torch.Tensor([3, 1])
y = torch.Tensor([1, 0]).long().to("openreg")
z = x[y, y]
self.assertEqual(z_cpu, z)
def test_tensorlist_type_fallback(self):
# create tensors located in custom device
v_openreg = torch.Tensor([1, 2, 3]).to("openreg")
# create result tensor located in cpu
z_cpu = torch.Tensor([2, 4, 6])
# create tensorlist for foreach_add op
x = (v_openreg, v_openreg)
y = (v_openreg, v_openreg)
# Check that our device is correct.
self.assertTrue(v_openreg.device.type == "openreg")
self.assertFalse(v_openreg.is_cpu)
# call _foreach_add op, which will fallback to cpu
z = torch._foreach_add(x, y)
self.assertEqual(z_cpu, z[0])
self.assertEqual(z_cpu, z[1])
if __name__ == "__main__": if __name__ == "__main__":
run_tests() run_tests()

View File

@ -23,7 +23,6 @@ TARGET_DET_LIST = [
"test_cpp_extensions_aot_ninja", "test_cpp_extensions_aot_ninja",
"test_cpp_extensions_aot_no_ninja", "test_cpp_extensions_aot_no_ninja",
"test_cpp_extensions_jit", "test_cpp_extensions_jit",
"test_cpp_extensions_open_device_registration",
"test_cpp_extensions_stream_and_event", "test_cpp_extensions_stream_and_event",
"test_cpp_extensions_mtia_backend", "test_cpp_extensions_mtia_backend",
"test_cuda", "test_cuda",