mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Always build USE_DISTRIBUTED. (#160449)
Signed-off-by: Edward Yang <ezyang@meta.com> Pull Request resolved: https://github.com/pytorch/pytorch/pull/160449 Approved by: https://github.com/wconstab, https://github.com/albanD, https://github.com/dcci
This commit is contained in:
committed by
PyTorch MergeBot
parent
fbcabb4fbd
commit
d80297a684
@ -1,4 +1,4 @@
|
||||
if(USE_DISTRIBUTED AND NOT WIN32)
|
||||
if(NOT WIN32)
|
||||
set(DIST_AUTOGRAD_TEST_DIR "${TORCH_ROOT}/test/cpp/dist_autograd")
|
||||
set(DIST_AUTOGRAD_TEST_SOURCES
|
||||
${TORCH_ROOT}/test/cpp/common/main.cpp
|
||||
|
@ -65,10 +65,7 @@ from torch.export.passes import move_to_device_pass
|
||||
from torch.fx.experimental.proxy_tensor import make_fx
|
||||
from torch.fx.experimental.symbolic_shapes import ShapeEnv
|
||||
from torch.testing import FileCheck
|
||||
from torch.testing._internal.common_cuda import (
|
||||
PLATFORM_SUPPORTS_FLASH_ATTENTION,
|
||||
xfailIfDistributedNotSupported,
|
||||
)
|
||||
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
|
||||
from torch.testing._internal.common_utils import (
|
||||
find_library_location,
|
||||
IS_FBCODE,
|
||||
@ -15555,7 +15552,6 @@ class GraphModule(torch.nn.Module):
|
||||
finally:
|
||||
torch.distributed.destroy_process_group()
|
||||
|
||||
@xfailIfDistributedNotSupported
|
||||
def test_distributed_all_reduce(self):
|
||||
class Foo(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -15573,7 +15569,6 @@ class GraphModule(torch.nn.Module):
|
||||
inp = (torch.randn(4, 4),)
|
||||
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
|
||||
|
||||
@xfailIfDistributedNotSupported
|
||||
def test_distributed_all_gather(self):
|
||||
class Foo(torch.nn.Module):
|
||||
def forward(self, x):
|
||||
@ -15589,7 +15584,6 @@ class GraphModule(torch.nn.Module):
|
||||
torch.allclose(a, b) for a, b in zip(ep.module()(*inp), m(*inp))
|
||||
)
|
||||
|
||||
@xfailIfDistributedNotSupported
|
||||
def test_distributed_all_gather_into_tensor(self):
|
||||
class Foo(torch.nn.Module):
|
||||
def forward(self, x):
|
||||
@ -15603,7 +15597,6 @@ class GraphModule(torch.nn.Module):
|
||||
inp = (torch.randn(2),)
|
||||
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
|
||||
|
||||
@xfailIfDistributedNotSupported
|
||||
@testing.expectedFailureCppRuntime
|
||||
def test_distributed_all_to_all_single(self):
|
||||
class Foo(torch.nn.Module):
|
||||
@ -15621,7 +15614,6 @@ class GraphModule(torch.nn.Module):
|
||||
)
|
||||
self.assertEqual(len(nodes), 1)
|
||||
|
||||
@xfailIfDistributedNotSupported
|
||||
@testing.expectedFailureCppRuntime
|
||||
def test_distributed_reduce_scatter_tensor(self):
|
||||
class Foo(torch.nn.Module):
|
||||
|
Reference in New Issue
Block a user