mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
enable unit tests (#25963)
Summary: These unit tests pass after landing all the warp size awareness patches. Pull Request resolved: https://github.com/pytorch/pytorch/pull/25963 Differential Revision: D17319124 Pulled By: bddppq fbshipit-source-id: 22f5d5f1ca9c67e66a7ccf983b2d2f889a74e729
This commit is contained in:
committed by
Facebook Github Bot
parent
075adb4d2d
commit
00d967c39d
@ -2660,7 +2660,6 @@ class TestCuda(TestCase):
|
||||
tensor = tensor.unsqueeze(1)
|
||||
self.assertEqual(tensor.var(0), 0.03125)
|
||||
|
||||
@skipIfRocm
|
||||
def test_digamma(self):
|
||||
def test(use_double=False):
|
||||
cpu_tensor = torch.randn(10, 10, 10)
|
||||
@ -2689,7 +2688,6 @@ class TestCuda(TestCase):
|
||||
norm_errors = (gpu_out - cpu_out.cuda()) / gpu_out
|
||||
self.assertEqual(norm_errors, expected_errors)
|
||||
|
||||
@skipIfRocm
|
||||
def test_polygamma(self):
|
||||
def test(use_double=False):
|
||||
cpu_tensor = torch.randn(10, 10, 10)
|
||||
|
@ -1875,7 +1875,6 @@ graph(%Ra, %Rb):
|
||||
|
||||
@unittest.skipIf(not RUN_CUDA, "test_dropout_cuda require CUDA")
|
||||
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
|
||||
@skipIfRocm
|
||||
def test_dropout_cuda(self):
|
||||
# Dropout AD is dispatched to _fused_dropout in CUDA case,
|
||||
# which is not included in TestJitGeneratedFunctional
|
||||
|
@ -47,7 +47,6 @@ class TestFuser(JitTestCase):
|
||||
self._test_fused_abs(device="cuda")
|
||||
|
||||
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
|
||||
@skipIfRocm
|
||||
def test_zero_element_tensors(self):
|
||||
def decode(sin_t, cos_t):
|
||||
theta = torch.atan2(sin_t.float(), cos_t.float())
|
||||
@ -358,7 +357,6 @@ class TestFuser(JitTestCase):
|
||||
# If this is a real problem, we'll need to revisit Torchscript Function
|
||||
# lifetimes in Python.
|
||||
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
|
||||
@skipIfRocm
|
||||
def test_lerp(self):
|
||||
start = torch.randn(4, 1, dtype=torch.float, device='cuda')
|
||||
end = torch.randn(1, 4, dtype=torch.float, device='cuda')
|
||||
|
@ -3336,7 +3336,6 @@ class TestNN(NNTestCase):
|
||||
self._test_InstanceNorm_general(nn.InstanceNorm3d, input, dtype=torch.float)
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
@skipIfRocm
|
||||
def test_InstanceNorm3d_general_cuda(self):
|
||||
b = random.randint(3, 5)
|
||||
c = random.randint(2, 5)
|
||||
@ -4007,7 +4006,6 @@ class TestNN(NNTestCase):
|
||||
self._test_batchnorm_grad()
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
@skipIfRocm
|
||||
def test_batchnorm_grad_cuda(self):
|
||||
self._test_batchnorm_grad("cuda")
|
||||
if TEST_CUDNN:
|
||||
|
@ -14,8 +14,7 @@ from torch import sparse
|
||||
from torch.optim.lr_scheduler import LambdaLR, StepLR, MultiStepLR, \
|
||||
ExponentialLR, CosineAnnealingLR, ReduceLROnPlateau, _LRScheduler, \
|
||||
CyclicLR, CosineAnnealingWarmRestarts, OneCycleLR
|
||||
from common_utils import TestCase, run_tests, TEST_WITH_UBSAN, load_tests, \
|
||||
skipIfRocm
|
||||
from common_utils import TestCase, run_tests, TEST_WITH_UBSAN, load_tests
|
||||
|
||||
# load_tests from common_utils is used to automatically filter tests for
|
||||
# sharding on sandcastle. This line silences flake warnings
|
||||
@ -285,7 +284,6 @@ class TestOptim(TestCase):
|
||||
[lambda opt: StepLR(opt, gamma=0.99999, step_size=300)]
|
||||
)
|
||||
|
||||
@skipIfRocm
|
||||
def test_adam(self):
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adam([weight, bias], lr=1e-3)
|
||||
@ -401,7 +399,6 @@ class TestOptim(TestCase):
|
||||
lambda opt: ReduceLROnPlateau(opt, threshold=1e-4)]
|
||||
)
|
||||
|
||||
@skipIfRocm
|
||||
def test_adamax(self):
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adamax([weight, bias], lr=1e-1)
|
||||
@ -426,7 +423,6 @@ class TestOptim(TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "Invalid momentum value: -1.0"):
|
||||
optim.RMSprop(None, lr=1e-2, momentum=-1.0)
|
||||
|
||||
@skipIfRocm
|
||||
def test_asgd(self):
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.ASGD([weight, bias], lr=1e-3, t0=100)
|
||||
@ -451,7 +447,6 @@ class TestOptim(TestCase):
|
||||
with self.assertRaisesRegex(ValueError, "Invalid eta values: 1.0, 0.5"):
|
||||
optim.Rprop(None, lr=1e-2, etas=(1.0, 0.5))
|
||||
|
||||
@skipIfRocm
|
||||
def test_lbfgs(self):
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.LBFGS([weight, bias]),
|
||||
|
Reference in New Issue
Block a user