mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[CI] Build MacOS M1 binaries without distributed support (#86451)
Partial fix for #86448 which causes the broken code to be exercised in CI. If this demonstrates the break, I'm not sure whether there should be a fix forward of https://github.com/pytorch/pytorch/pull/85781 or a revert Pull Request resolved: https://github.com/pytorch/pytorch/pull/86451 Approved by: https://github.com/malfet
This commit is contained in:
@ -1,14 +1,15 @@
|
||||
# Owner(s): ["oncall: distributed"]
|
||||
|
||||
from typing import List, Optional, Tuple
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
import torch.distributed
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch import Tensor
|
||||
from torch.optim import SGD, Adam, AdamW
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch.distributed.optim.utils import functional_optim_map, register_functional_optim
|
||||
|
||||
class MyModule(torch.nn.Module):
|
||||
def __init__(self):
|
||||
@ -64,6 +65,10 @@ class MyDummyFnOptimizer(object):
|
||||
with torch.no_grad():
|
||||
raise RuntimeError("MyDummyFnOptimizer does not support step() as of now")
|
||||
|
||||
if torch.distributed.is_available():
|
||||
from torch.distributed.optim.utils import functional_optim_map, register_functional_optim
|
||||
|
||||
@unittest.skipIf(not torch.distributed.is_available(), "These are testing distributed functions")
|
||||
class TestFunctionalOptimParity(TestCase):
|
||||
def _validate_parameters(self, params_1, params_2):
|
||||
for p1, p2 in zip(params_1, params_2):
|
||||
|
||||
Reference in New Issue
Block a user