mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Actually run Dynamo tests in all Dynamo shards (#115962)
We weren't doing this before. Also adds some more skips so that CI passes Pull Request resolved: https://github.com/pytorch/pytorch/pull/115962 Approved by: https://github.com/voznesenskym ghstack dependencies: #115925
This commit is contained in:
@ -1089,13 +1089,12 @@ elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_inductor
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_without_numpy
|
||||
install_torchvision
|
||||
test_dynamo_shard 1
|
||||
test_aten
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && $SHARD_NUMBER -gt 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_shard 2
|
||||
test_dynamo_shard "${SHARD_NUMBER}"
|
||||
elif [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_without_numpy
|
||||
install_torchvision
|
||||
|
@ -4,7 +4,7 @@ import collections
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfTorchDynamo
|
||||
from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
|
||||
@ -101,22 +101,26 @@ class TestAutocastCPU(TestCase):
|
||||
else:
|
||||
return op_with_args[0], op_with_args[1], op_with_args[2]
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_torch_expect_builtin_promote(self):
|
||||
for op, args1, args2, out_type in self.autocast_lists.torch_expect_builtin_promote:
|
||||
self._run_autocast_outofplace(op, args1, torch.float32, out_type=out_type)
|
||||
self._run_autocast_outofplace(op, args2, torch.float32, out_type=out_type, amp_dtype=torch.float16)
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_methods_expect_builtin_promote(self):
|
||||
for op, args1, args2, out_type in self.autocast_lists.methods_expect_builtin_promote:
|
||||
self._run_autocast_outofplace(op, args1, torch.float32, module=None, out_type=out_type)
|
||||
self._run_autocast_outofplace(op, args2, torch.float32, module=None, out_type=out_type, amp_dtype=torch.float16)
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_torch_16(self):
|
||||
for op_with_args in self.autocast_lists.torch_16:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
self._run_autocast_outofplace(op, args, torch.bfloat16, add_kwargs=maybe_kwargs)
|
||||
self._run_autocast_outofplace(op, args, torch.float16, add_kwargs=maybe_kwargs, amp_dtype=torch.float16)
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_nn_16(self):
|
||||
for op_with_args in self.autocast_lists.nn_16:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
@ -132,12 +136,14 @@ class TestAutocastCPU(TestCase):
|
||||
amp_dtype=torch.float16,
|
||||
)
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_torch_fp32(self):
|
||||
for op_with_args in self.autocast_lists.torch_fp32:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
|
||||
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs, amp_dtype=torch.float16)
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_nn_fp32(self):
|
||||
for op_with_args in self.autocast_lists.nn_fp32:
|
||||
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
|
||||
@ -153,6 +159,7 @@ class TestAutocastCPU(TestCase):
|
||||
amp_dtype=torch.float16,
|
||||
)
|
||||
|
||||
@skipIfTorchDynamo
|
||||
def test_autocast_torch_need_autocast_promote(self):
|
||||
for op, args1, args2 in self.autocast_lists.torch_need_autocast_promote:
|
||||
self._run_autocast_outofplace(op, args1, torch.float32)
|
||||
|
@ -10,7 +10,7 @@ from itertools import permutations, product
|
||||
from torch.testing import make_tensor
|
||||
from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and, integral_types
|
||||
from torch.testing._internal.common_utils import \
|
||||
(TestCase, run_tests, slowTest)
|
||||
(TestCase, run_tests, slowTest, skipIfTorchDynamo)
|
||||
from torch.testing._internal.common_device_type import \
|
||||
(instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes,
|
||||
onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
|
||||
@ -366,6 +366,7 @@ class TestSortAndSelect(TestCase):
|
||||
for shape in shapes:
|
||||
test(shape)
|
||||
|
||||
@skipIfTorchDynamo("Fails on python 3.11")
|
||||
@dtypes(torch.float)
|
||||
def test_sort_expanded_tensor(self, device, dtype):
|
||||
# https://github.com/pytorch/pytorch/issues/91420
|
||||
|
Reference in New Issue
Block a user