mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
For https://github.com/pytorch/pytorch/issues/114850, we will port distributed tests to Intel GPU. We could enable Intel GPU with following methods and try the best to keep the original code styles: - Instantiate_device_type_tests() - Use "torch.accelerator.current_accelerator()" to determine the accelerator backend - Enabled XPU for some test path - Added allow_xpu=True for supported test class Pull Request resolved: https://github.com/pytorch/pytorch/pull/160158 Approved by: https://github.com/guangyey, https://github.com/d4l3k
70 lines
2.1 KiB
Python
70 lines
2.1 KiB
Python
# Owner(s): ["oncall: distributed"]
|
|
import sys
|
|
|
|
import torch
|
|
from torch import distributed as dist
|
|
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
|
|
from torch.testing._internal.common_device_type import instantiate_device_type_tests
|
|
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
|
|
from torch.testing._internal.common_fsdp import (
|
|
DEVICEInitMode,
|
|
FSDPInitMode,
|
|
FSDPTest,
|
|
NestedWrappedModule,
|
|
)
|
|
from torch.testing._internal.common_utils import run_tests, TEST_WITH_DEV_DBG_ASAN
|
|
|
|
|
|
if not dist.is_available():
|
|
print("Distributed not available, skipping tests", file=sys.stderr)
|
|
sys.exit(0)
|
|
if TEST_WITH_DEV_DBG_ASAN:
|
|
print(
|
|
"Skip dev-asan as torch + multiprocessing spawn have known issues",
|
|
file=sys.stderr,
|
|
)
|
|
sys.exit(0)
|
|
|
|
|
|
class TestTraversal(FSDPTest):
|
|
@property
|
|
def world_size(self):
|
|
if torch.torch.accelerator.is_available():
|
|
gpu_cnt = torch.accelerator.device_count()
|
|
if gpu_cnt < 2:
|
|
return gpu_cnt
|
|
return 2
|
|
|
|
@skip_if_lt_x_gpu(2)
|
|
def test_fsdp_modules(self):
|
|
nested_wrapped_module = NestedWrappedModule.init(
|
|
self.process_group,
|
|
FSDPInitMode.RECURSIVE,
|
|
DEVICEInitMode.DEVICE_BEFORE,
|
|
)
|
|
modules = FSDP.fsdp_modules(nested_wrapped_module)
|
|
self.assertEqual(
|
|
modules,
|
|
[
|
|
nested_wrapped_module.module.get_submodule("1"),
|
|
nested_wrapped_module.module.get_submodule("1").get_submodule("0"),
|
|
nested_wrapped_module.module.get_submodule("2"),
|
|
],
|
|
)
|
|
modules = FSDP.fsdp_modules(nested_wrapped_module, root_only=True)
|
|
self.assertEqual(
|
|
modules,
|
|
[
|
|
nested_wrapped_module.module.get_submodule("1"),
|
|
nested_wrapped_module.module.get_submodule("2"),
|
|
],
|
|
)
|
|
|
|
|
|
devices = ("cuda", "hpu", "xpu")
|
|
instantiate_device_type_tests(
|
|
TestTraversal, globals(), only_for=devices, allow_xpu=True
|
|
)
|
|
if __name__ == "__main__":
|
|
run_tests()
|