From 992c4e7b249fe33b5d78c9bd9f7772c7129388b5 Mon Sep 17 00:00:00 2001 From: rzou Date: Fri, 15 Dec 2023 16:57:28 -0800 Subject: [PATCH] Actually run Dynamo tests in all Dynamo shards (#115962) We weren't doing this before. Also adds some more skips so that CI passes Pull Request resolved: https://github.com/pytorch/pytorch/pull/115962 Approved by: https://github.com/voznesenskym ghstack dependencies: #115925 --- .ci/pytorch/test.sh | 5 ++--- test/test_autocast.py | 9 ++++++++- test/test_sort_and_select.py | 3 ++- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index 779c72e2996b..956215ec4d55 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -1089,13 +1089,12 @@ elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then test_inductor test_inductor_distributed elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then - test_without_numpy install_torchvision test_dynamo_shard 1 test_aten -elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 2 && $NUM_TEST_SHARDS -gt 1 ]]; then +elif [[ "${TEST_CONFIG}" == *dynamo* && $SHARD_NUMBER -gt 1 && $NUM_TEST_SHARDS -gt 1 ]]; then install_torchvision - test_dynamo_shard 2 + test_dynamo_shard "${SHARD_NUMBER}" elif [[ "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then test_without_numpy install_torchvision diff --git a/test/test_autocast.py b/test/test_autocast.py index fd8f57c5276d..f7f8e8ccdfe6 100644 --- a/test/test_autocast.py +++ b/test/test_autocast.py @@ -4,7 +4,7 @@ import collections import unittest import torch -from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS +from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS, skipIfTorchDynamo from torch.testing._internal.autocast_test_lists import AutocastCPUTestLists from torch.utils._python_dispatch import TorchDispatchMode @@ -101,22 +101,26 @@ class TestAutocastCPU(TestCase): else: return op_with_args[0], op_with_args[1], op_with_args[2] + @skipIfTorchDynamo def test_autocast_torch_expect_builtin_promote(self): for op, args1, args2, out_type in self.autocast_lists.torch_expect_builtin_promote: self._run_autocast_outofplace(op, args1, torch.float32, out_type=out_type) self._run_autocast_outofplace(op, args2, torch.float32, out_type=out_type, amp_dtype=torch.float16) + @skipIfTorchDynamo def test_autocast_methods_expect_builtin_promote(self): for op, args1, args2, out_type in self.autocast_lists.methods_expect_builtin_promote: self._run_autocast_outofplace(op, args1, torch.float32, module=None, out_type=out_type) self._run_autocast_outofplace(op, args2, torch.float32, module=None, out_type=out_type, amp_dtype=torch.float16) + @skipIfTorchDynamo def test_autocast_torch_16(self): for op_with_args in self.autocast_lists.torch_16: op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args) self._run_autocast_outofplace(op, args, torch.bfloat16, add_kwargs=maybe_kwargs) self._run_autocast_outofplace(op, args, torch.float16, add_kwargs=maybe_kwargs, amp_dtype=torch.float16) + @skipIfTorchDynamo def test_autocast_nn_16(self): for op_with_args in self.autocast_lists.nn_16: op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args) @@ -132,12 +136,14 @@ class TestAutocastCPU(TestCase): amp_dtype=torch.float16, ) + @skipIfTorchDynamo def test_autocast_torch_fp32(self): for op_with_args in self.autocast_lists.torch_fp32: op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args) self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs) self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs, amp_dtype=torch.float16) + @skipIfTorchDynamo def test_autocast_nn_fp32(self): for op_with_args in self.autocast_lists.nn_fp32: op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args) @@ -153,6 +159,7 @@ class TestAutocastCPU(TestCase): amp_dtype=torch.float16, ) + @skipIfTorchDynamo def test_autocast_torch_need_autocast_promote(self): for op, args1, args2 in self.autocast_lists.torch_need_autocast_promote: self._run_autocast_outofplace(op, args1, torch.float32) diff --git a/test/test_sort_and_select.py b/test/test_sort_and_select.py index d3b04617d2c1..4b5033549f80 100644 --- a/test/test_sort_and_select.py +++ b/test/test_sort_and_select.py @@ -10,7 +10,7 @@ from itertools import permutations, product from torch.testing import make_tensor from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and, integral_types from torch.testing._internal.common_utils import \ - (TestCase, run_tests, slowTest) + (TestCase, run_tests, slowTest, skipIfTorchDynamo) from torch.testing._internal.common_device_type import \ (instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes, onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest) @@ -366,6 +366,7 @@ class TestSortAndSelect(TestCase): for shape in shapes: test(shape) + @skipIfTorchDynamo("Fails on python 3.11") @dtypes(torch.float) def test_sort_expanded_tensor(self, device, dtype): # https://github.com/pytorch/pytorch/issues/91420