Revert "Enable more DTensor tests in local tensor mode and fix more integration issues (#165716)"

This reverts commit 1b397420f22b22f90a1093233ecd9167656e50cb.

Reverted https://github.com/pytorch/pytorch/pull/165716 on behalf of https://github.com/pytorch-auto-revert due to Reverted automatically by pytorch's autorevert, to avoid this behaviour add the tag autorevert: disable ([comment](https://github.com/pytorch/pytorch/pull/165716#issuecomment-3418083391))
This commit is contained in:
PyTorch MergeBot
2025-10-18 09:15:49 +00:00
parent 4740ce7787
commit beb6b62e8c
8 changed files with 25 additions and 150 deletions

View File

@ -211,14 +211,6 @@ def at_least_x_gpu(x):
return False
def _maybe_handle_skip_if_lt_x_gpu(args, msg) -> bool:
_handle_test_skip = getattr(args[0], "_handle_test_skip", None)
if len(args) == 0 or _handle_test_skip is None:
return False
_handle_test_skip(msg)
return True
def skip_if_lt_x_gpu(x):
def decorator(func):
@wraps(func)
@ -229,9 +221,7 @@ def skip_if_lt_x_gpu(x):
return func(*args, **kwargs)
if TEST_XPU and torch.xpu.device_count() >= x:
return func(*args, **kwargs)
test_skip = TEST_SKIPS[f"multi-gpu-{x}"]
if _maybe_handle_skip_if_lt_x_gpu(args, test_skip.message):
sys.exit(test_skip.exit_code)
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
return wrapper
@ -247,9 +237,7 @@ def nccl_skip_if_lt_x_gpu(backend, x):
return func(*args, **kwargs)
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
return func(*args, **kwargs)
test_skip = TEST_SKIPS[f"multi-gpu-{x}"]
if _maybe_handle_skip_if_lt_x_gpu(args, test_skip.message):
sys.exit(test_skip.exit_code)
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
return wrapper

View File

@ -701,9 +701,6 @@ class DTensorConverter:
class LocalDTensorTestBase(DTensorTestBase):
def _handle_test_skip(self, msg: str) -> None:
self.skipTest(msg)
def _get_local_tensor_mode(self):
return LocalTensorMode(frozenset(range(self.world_size)))