mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "Enable more DTensor tests in local tensor mode and fix more integration issues (#165716)"
This reverts commit 1b397420f22b22f90a1093233ecd9167656e50cb. Reverted https://github.com/pytorch/pytorch/pull/165716 on behalf of https://github.com/pytorch-auto-revert due to Reverted automatically by pytorch's autorevert, to avoid this behaviour add the tag autorevert: disable ([comment](https://github.com/pytorch/pytorch/pull/165716#issuecomment-3418083391))
This commit is contained in:
@ -211,14 +211,6 @@ def at_least_x_gpu(x):
|
||||
return False
|
||||
|
||||
|
||||
def _maybe_handle_skip_if_lt_x_gpu(args, msg) -> bool:
|
||||
_handle_test_skip = getattr(args[0], "_handle_test_skip", None)
|
||||
if len(args) == 0 or _handle_test_skip is None:
|
||||
return False
|
||||
_handle_test_skip(msg)
|
||||
return True
|
||||
|
||||
|
||||
def skip_if_lt_x_gpu(x):
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
@ -229,9 +221,7 @@ def skip_if_lt_x_gpu(x):
|
||||
return func(*args, **kwargs)
|
||||
if TEST_XPU and torch.xpu.device_count() >= x:
|
||||
return func(*args, **kwargs)
|
||||
test_skip = TEST_SKIPS[f"multi-gpu-{x}"]
|
||||
if _maybe_handle_skip_if_lt_x_gpu(args, test_skip.message):
|
||||
sys.exit(test_skip.exit_code)
|
||||
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
|
||||
|
||||
return wrapper
|
||||
|
||||
@ -247,9 +237,7 @@ def nccl_skip_if_lt_x_gpu(backend, x):
|
||||
return func(*args, **kwargs)
|
||||
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
|
||||
return func(*args, **kwargs)
|
||||
test_skip = TEST_SKIPS[f"multi-gpu-{x}"]
|
||||
if _maybe_handle_skip_if_lt_x_gpu(args, test_skip.message):
|
||||
sys.exit(test_skip.exit_code)
|
||||
sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
Reference in New Issue
Block a user