mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Enable functional optim tests for windows (#63462)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/63462 Now that `torch.distributed.optim` gates DistributedOptimizer on RPC availability, these tests can be run on windows. ghstack-source-id: 136437635 Test Plan: CI Reviewed By: SciPioneer Differential Revision: D30358923 fbshipit-source-id: 36739bdfe7214789f17de652d30c62c2bc124c73
This commit is contained in:
committed by
Facebook GitHub Bot
parent
630ec2e190
commit
16a4434422
@ -39,7 +39,6 @@ from torch.testing._internal.common_distributed import (
|
||||
with_nccl_blocking_wait,
|
||||
)
|
||||
from torch.testing._internal.common_utils import (
|
||||
IS_WINDOWS,
|
||||
TestCase,
|
||||
run_tests,
|
||||
retry_on_connect_failures,
|
||||
@ -51,10 +50,9 @@ from torch.testing._internal.common_utils import (
|
||||
from torch.utils.checkpoint import checkpoint
|
||||
from torch.distributed.optim import functional_optim_map
|
||||
|
||||
if not IS_WINDOWS:
|
||||
from torch.distributed.optim.functional_sgd import _FunctionalSGD
|
||||
from torch.distributed.optim.functional_adam import _FunctionalAdam
|
||||
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
|
||||
from torch.distributed.optim.functional_sgd import _FunctionalSGD
|
||||
from torch.distributed.optim.functional_adam import _FunctionalAdam
|
||||
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
|
||||
|
||||
if TEST_WITH_DEV_DBG_ASAN:
|
||||
print(
|
||||
|
@ -1,10 +1,8 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.optim import SGD, Adam, AdamW
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch.distributed.optim import functional_optim_map
|
||||
|
||||
class MyModule(torch.nn.Module):
|
||||
@ -80,24 +78,12 @@ class TestFunctionalOptimParity(TestCase):
|
||||
self.assertNotEqual(old_module_optim_params[i], optim_param)
|
||||
self.assertNotEqual(old_module_functional_params[i], functional_param)
|
||||
|
||||
@unittest.skipIf(
|
||||
IS_WINDOWS,
|
||||
"Functional optimizer not support on windows, see https://github.com/pytorch/pytorch/issues/62137",
|
||||
)
|
||||
def test_functional_optim_parity_sgd(self):
|
||||
self._test_functional_optim_parity(SGD, 1e-2, momentum=0.9, weight_decay=0.01)
|
||||
|
||||
@unittest.skipIf(
|
||||
IS_WINDOWS,
|
||||
"Functional optimizer not support on windows, see https://github.com/pytorch/pytorch/issues/62137",
|
||||
)
|
||||
def test_functional_optim_parity_adam(self):
|
||||
self._test_functional_optim_parity(Adam, 1e-2, betas=(0.9, 0.999), eps=1e-6)
|
||||
|
||||
@unittest.skipIf(
|
||||
IS_WINDOWS,
|
||||
"Functional optimizer not support on windows, see https://github.com/pytorch/pytorch/issues/62137",
|
||||
)
|
||||
def test_functional_optim_parity_adam_w(self):
|
||||
self._test_functional_optim_parity(AdamW, 1e-2, betas=(0.9, 0.999), eps=1e-6)
|
||||
|
||||
|
@ -68,11 +68,12 @@ from torch.testing._internal.common_utils import (
|
||||
|
||||
from torch.distributed.optim import functional_optim_map
|
||||
|
||||
from torch.distributed.optim.functional_sgd import _FunctionalSGD
|
||||
from torch.distributed.optim.functional_adam import _FunctionalAdam
|
||||
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
|
||||
|
||||
if not IS_WINDOWS:
|
||||
import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer
|
||||
from torch.distributed.optim.functional_sgd import _FunctionalSGD
|
||||
from torch.distributed.optim.functional_adam import _FunctionalAdam
|
||||
from torch.distributed.optim.functional_adamw import _FunctionalAdamW
|
||||
|
||||
from torch.utils.data.distributed import DistributedSampler
|
||||
|
||||
@ -4003,10 +4004,6 @@ class DistributedTest:
|
||||
BACKEND != "nccl" and BACKEND != "gloo",
|
||||
"Only Nccl & Gloo backend support DistributedDataParallel",
|
||||
)
|
||||
@sandcastle_skip_if(
|
||||
IS_WINDOWS,
|
||||
"FunctionalAdam not yet supported with Windows, see https://github.com/pytorch/pytorch/issues/62137"
|
||||
)
|
||||
@skip_if_lt_x_gpu(2)
|
||||
@skip_if_rocm
|
||||
def test_ddp_hook_with_optimizer_parity_adamw(self):
|
||||
@ -4029,10 +4026,6 @@ class DistributedTest:
|
||||
BACKEND != "nccl" and BACKEND != "gloo",
|
||||
"Only Nccl & Gloo backend support DistributedDataParallel",
|
||||
)
|
||||
@sandcastle_skip_if(
|
||||
IS_WINDOWS,
|
||||
"FunctionalAdam not yet supported with Windows, see https://github.com/pytorch/pytorch/issues/62137"
|
||||
)
|
||||
@skip_if_lt_x_gpu(2)
|
||||
@skip_if_rocm
|
||||
def test_ddp_hook_with_optimizer_parity_adam(self):
|
||||
@ -4055,10 +4048,6 @@ class DistributedTest:
|
||||
BACKEND != "nccl" and BACKEND != "gloo",
|
||||
"Only Nccl & Gloo backend support DistributedDataParallel",
|
||||
)
|
||||
@sandcastle_skip_if(
|
||||
IS_WINDOWS,
|
||||
"FunctionalSGD not yet supported with Windows, see https://github.com/pytorch/pytorch/issues/62137"
|
||||
)
|
||||
@skip_if_lt_x_gpu(2)
|
||||
@skip_if_rocm
|
||||
def test_ddp_hook_with_optimizer_parity_sgd(self):
|
||||
|
Reference in New Issue
Block a user