Always build USE_DISTRIBUTED. (#160449)

Signed-off-by: Edward Yang <ezyang@meta.com>

Pull Request resolved: https://github.com/pytorch/pytorch/pull/160449
Approved by: https://github.com/wconstab, https://github.com/albanD, https://github.com/dcci
This commit is contained in:
Edward Yang
2025-09-08 09:36:40 -04:00
committed by PyTorch MergeBot
parent fbcabb4fbd
commit d80297a684
31 changed files with 125 additions and 215 deletions

View File

@ -1,4 +1,4 @@
if(USE_DISTRIBUTED AND NOT WIN32)
if(NOT WIN32)
set(DIST_AUTOGRAD_TEST_DIR "${TORCH_ROOT}/test/cpp/dist_autograd")
set(DIST_AUTOGRAD_TEST_SOURCES
${TORCH_ROOT}/test/cpp/common/main.cpp

View File

@ -65,10 +65,7 @@ from torch.export.passes import move_to_device_pass
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.experimental.symbolic_shapes import ShapeEnv
from torch.testing import FileCheck
from torch.testing._internal.common_cuda import (
PLATFORM_SUPPORTS_FLASH_ATTENTION,
xfailIfDistributedNotSupported,
)
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_utils import (
find_library_location,
IS_FBCODE,
@ -15555,7 +15552,6 @@ class GraphModule(torch.nn.Module):
finally:
torch.distributed.destroy_process_group()
@xfailIfDistributedNotSupported
def test_distributed_all_reduce(self):
class Foo(torch.nn.Module):
def __init__(self):
@ -15573,7 +15569,6 @@ class GraphModule(torch.nn.Module):
inp = (torch.randn(4, 4),)
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
@xfailIfDistributedNotSupported
def test_distributed_all_gather(self):
class Foo(torch.nn.Module):
def forward(self, x):
@ -15589,7 +15584,6 @@ class GraphModule(torch.nn.Module):
torch.allclose(a, b) for a, b in zip(ep.module()(*inp), m(*inp))
)
@xfailIfDistributedNotSupported
def test_distributed_all_gather_into_tensor(self):
class Foo(torch.nn.Module):
def forward(self, x):
@ -15603,7 +15597,6 @@ class GraphModule(torch.nn.Module):
inp = (torch.randn(2),)
self.assertTrue(torch.allclose(ep.module()(*inp), m(*inp)))
@xfailIfDistributedNotSupported
@testing.expectedFailureCppRuntime
def test_distributed_all_to_all_single(self):
class Foo(torch.nn.Module):
@ -15621,7 +15614,6 @@ class GraphModule(torch.nn.Module):
)
self.assertEqual(len(nodes), 1)
@xfailIfDistributedNotSupported
@testing.expectedFailureCppRuntime
def test_distributed_reduce_scatter_tensor(self):
class Foo(torch.nn.Module):