From cbde0f048b730e866c53154673d145ab28be46ad Mon Sep 17 00:00:00 2001 From: William Wen Date: Thu, 4 Apr 2024 16:58:26 -0700 Subject: [PATCH] [dynamo, 3.12] enable tests disabled due to missing dynamo 3.12 support (#123300) Pull Request resolved: https://github.com/pytorch/pytorch/pull/123300 Approved by: https://github.com/jansel, https://github.com/malfet, https://github.com/zou3519 --- test/custom_operator/test_custom_ops.py | 3 --- test/dynamo/test_activation_checkpointing.py | 25 ------------------- test/functorch/test_vmap.py | 2 -- test/fx/test_matcher_utils.py | 3 --- test/inductor/test_mmdecomp.py | 6 ++--- test/inductor/test_perf.py | 6 ++--- test/profiler/test_profiler.py | 1 - .../core/experimental/test_float8.py | 4 --- test/quantization/core/test_quantized_op.py | 4 --- test/quantization/pt2e/test_duplicate_dq.py | 4 --- .../test_generate_numeric_debug_handle.py | 4 --- test/quantization/pt2e/test_graph_utils.py | 4 --- .../pt2e/test_metadata_porting.py | 4 --- test/quantization/pt2e/test_quantize_pt2e.py | 3 --- .../pt2e/test_quantize_pt2e_qat.py | 10 -------- test/quantization/pt2e/test_representation.py | 5 ---- .../pt2e/test_xnnpack_quantizer.py | 8 ------ test/test_binary_ufuncs.py | 5 ---- test/test_custom_ops.py | 13 ---------- test/test_decomp.py | 2 -- test/test_fake_tensor.py | 4 --- test/test_linalg.py | 7 ------ test/test_nestedtensor.py | 1 - test/test_sparse_semi_structured.py | 3 --- test/test_transformers.py | 3 --- tools/dynamo/verify_dynamo.py | 4 +-- 26 files changed, 6 insertions(+), 132 deletions(-) diff --git a/test/custom_operator/test_custom_ops.py b/test/custom_operator/test_custom_ops.py index 2e5028e41cfa..9eadb95c842a 100644 --- a/test/custom_operator/test_custom_ops.py +++ b/test/custom_operator/test_custom_ops.py @@ -32,9 +32,6 @@ class TestCustomOperators(TestCase): torch.ops.custom.cos(x) @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) def test_dynamo_pystub_suggestion(self): x = torch.randn(3) diff --git a/test/dynamo/test_activation_checkpointing.py b/test/dynamo/test_activation_checkpointing.py index 5208421efbee..efb2c448d1df 100644 --- a/test/dynamo/test_activation_checkpointing.py +++ b/test/dynamo/test_activation_checkpointing.py @@ -1,7 +1,6 @@ # Owner(s): ["module: dynamo"] import functools import math -import sys import unittest # noqa: F811 from importlib import import_module @@ -518,9 +517,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) @@ -571,9 +567,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) @@ -627,9 +620,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) @@ -698,9 +688,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) @@ -750,9 +737,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) @@ -801,9 +785,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @unittest.skip( "In-place op support in selective checkpointing + torch.compile " "requires TorchDispatchMode + torch.compile work to complete" @@ -858,9 +839,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): @requires_cuda @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) @@ -922,9 +900,6 @@ class ActivationCheckpointingViaTagsTests(torch._dynamo.test_case.TestCase): self._compare_orig_and_checkpointed_fns(gn, fn, x) @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @torch._dynamo.config.patch( "_experimental_support_context_fn_in_torch_utils_checkpoint", True ) diff --git a/test/functorch/test_vmap.py b/test/functorch/test_vmap.py index 02bee7dd0a83..81d3362ac82d 100644 --- a/test/functorch/test_vmap.py +++ b/test/functorch/test_vmap.py @@ -53,7 +53,6 @@ from common_utils import ( ) import types import os -import sys from collections import namedtuple import contextlib @@ -2156,7 +2155,6 @@ class TestVmapOperators(Namespace.TestVmapBase): @unittest.skipIf(IS_WINDOWS, reason="Windows not yet supported for torch.compile") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_is_contiguous(self): def foo(x): if x.is_contiguous(): diff --git a/test/fx/test_matcher_utils.py b/test/fx/test_matcher_utils.py index d0c3458bf61a..f16b7ef5e89a 100644 --- a/test/fx/test_matcher_utils.py +++ b/test/fx/test_matcher_utils.py @@ -142,7 +142,6 @@ class TestMatcher(JitTestCase): self.assertEqual(len(match_sp_result), 1) @unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_split_to_graph_and_name_node_map(self): """Testing the internal helper function for splitting the pattern graph""" from torch.fx.passes.utils.matcher_with_name_node_map_utils import _split_to_graph_and_name_node_map @@ -166,7 +165,6 @@ class TestMatcher(JitTestCase): self.assertEqual(before_split_res[1], after_split_res[1]) @unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_matcher_with_name_node_map_function(self): """Testing SubgraphMatcherWithNameNodeMap with function pattern """ @@ -205,7 +203,6 @@ class TestMatcher(JitTestCase): assert "custom_annotation" in n.meta and n.meta["custom_annotation"] == "annotation" @unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_matcher_with_name_node_map_module(self): """Testing SubgraphMatcherWithNameNodeMap with module pattern """ diff --git a/test/inductor/test_mmdecomp.py b/test/inductor/test_mmdecomp.py index 81152a6d1d51..9319fed2b794 100644 --- a/test/inductor/test_mmdecomp.py +++ b/test/inductor/test_mmdecomp.py @@ -1,7 +1,6 @@ # Owner(s): ["module: nn"] import math -import sys import unittest from typing import List, Tuple, Union @@ -198,7 +197,6 @@ device_types = ("cpu", "cuda") instantiate_device_type_tests(TestDecomp, globals(), only_for=device_types) if __name__ == "__main__": - # We don't support torch.compile() on - # Windows and Python 3.12+ - if not IS_WINDOWS and sys.version_info < (3, 12): + # We don't support torch.compile() on Windows + if not IS_WINDOWS: run_tests() diff --git a/test/inductor/test_perf.py b/test/inductor/test_perf.py index 22ccb63be7eb..c82febb1ce0c 100644 --- a/test/inductor/test_perf.py +++ b/test/inductor/test_perf.py @@ -1,6 +1,5 @@ # Owner(s): ["module: inductor"] import contextlib -import sys from unittest.mock import patch import functorch @@ -41,9 +40,8 @@ def count_bytes_inductor(gm, example_inputs): return compile_fx(gm, example_inputs, inner_compile=count_bytes_inner) -# We don't support torch.compile() on -# Windows and Python 3.12+ -if not IS_WINDOWS and sys.version_info < (3, 12): +# We don't support torch.compile() on Windows +if not IS_WINDOWS: @torch._dynamo.optimize(count_bytes_inductor) def f(x): diff --git a/test/profiler/test_profiler.py b/test/profiler/test_profiler.py index bbd04d81aa1a..452f13868844 100644 --- a/test/profiler/test_profiler.py +++ b/test/profiler/test_profiler.py @@ -487,7 +487,6 @@ class TestExecutionTrace(TestCase): assert loop_count == expected_loop_events @unittest.skipIf(IS_WINDOWS, 'torch.compile does not support WINDOWS') - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_execution_trace_with_pt2(self): class ConvAndRelu(nn.Module): diff --git a/test/quantization/core/experimental/test_float8.py b/test/quantization/core/experimental/test_float8.py index 3d8b7840038c..1f735f29e322 100644 --- a/test/quantization/core/experimental/test_float8.py +++ b/test/quantization/core/experimental/test_float8.py @@ -1,6 +1,5 @@ # Owner(s): ["oncall: quantization"] -import sys import unittest import torch @@ -288,9 +287,6 @@ class TestFloat8DtypeCPUOnly(TestCase): self.assertEqual(mul8, mul8_simulated) @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on Windows yet") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @dtypes(*CUDA_FLOAT8_DTYPES) def test_pt2_traceable_aot_eager(self, dtype): @torch.compile(backend="aot_eager", fullgraph=True) diff --git a/test/quantization/core/test_quantized_op.py b/test/quantization/core/test_quantized_op.py index efe2afb4d135..e1eed794c947 100644 --- a/test/quantization/core/test_quantized_op.py +++ b/test/quantization/core/test_quantized_op.py @@ -6,7 +6,6 @@ import itertools import numpy as np import operator import random -import sys import unittest from typing import NamedTuple, List @@ -3401,9 +3400,6 @@ class TestDynamicQuantizedOps(TestCase): self.assertEqual(out, ref) - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @skipIfNoFBGEMM def test_wrapped_fbgemm_pack_gemm_matrix_fp16_pt2_compliant(self): # We are not using opcheck over here because the output for the op we're testing diff --git a/test/quantization/pt2e/test_duplicate_dq.py b/test/quantization/pt2e/test_duplicate_dq.py index 97073665de57..579a16ff766e 100644 --- a/test/quantization/pt2e/test_duplicate_dq.py +++ b/test/quantization/pt2e/test_duplicate_dq.py @@ -1,6 +1,5 @@ # Owner(s): ["oncall: quantization"] import copy -import sys import unittest from typing import Any, Dict @@ -92,9 +91,6 @@ _DEQUANTIZE_OPS = [ @unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile") -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestDuplicateDQPass(QuantizationTestCase): def _test_duplicate_dq( self, diff --git a/test/quantization/pt2e/test_generate_numeric_debug_handle.py b/test/quantization/pt2e/test_generate_numeric_debug_handle.py index 9092dff134a4..2427ff2bb170 100644 --- a/test/quantization/pt2e/test_generate_numeric_debug_handle.py +++ b/test/quantization/pt2e/test_generate_numeric_debug_handle.py @@ -1,6 +1,5 @@ # Owner(s): ["oncall: quantization"] -import sys import unittest import torch @@ -65,9 +64,6 @@ def _extract_conv2d_pattern_debug_handle_map(model): @unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile") -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestGenerateNumericDebugHandle(TestCase): def test_simple(self): m = TestHelperModules.Conv2dThenConv1d() diff --git a/test/quantization/pt2e/test_graph_utils.py b/test/quantization/pt2e/test_graph_utils.py index ab5171ece80a..a20338a97e82 100644 --- a/test/quantization/pt2e/test_graph_utils.py +++ b/test/quantization/pt2e/test_graph_utils.py @@ -1,7 +1,6 @@ # Owner(s): ["oncall: quantization"] import copy import unittest -import sys import torch import torch._dynamo as torchdynamo @@ -20,7 +19,6 @@ from torch.testing._internal.common_utils import ( class TestGraphUtils(TestCase): @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_conv_bn_conv_relu(self): class M(torch.nn.Module): def __init__(self): @@ -67,7 +65,6 @@ class TestGraphUtils(TestCase): self.assertRaises(ValueError, x) @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_conv_bn_relu(self): class M(torch.nn.Module): def __init__(self): @@ -103,7 +100,6 @@ class TestGraphUtils(TestCase): self.assertEqual(len(fused_partitions), 0) @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on Windows") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_customized_equivalet_types_dict(self): class M(torch.nn.Module): def __init__(self): diff --git a/test/quantization/pt2e/test_metadata_porting.py b/test/quantization/pt2e/test_metadata_porting.py index ebb7236cfe64..40bb1f255541 100644 --- a/test/quantization/pt2e/test_metadata_porting.py +++ b/test/quantization/pt2e/test_metadata_porting.py @@ -1,6 +1,5 @@ # Owner(s): ["oncall: quantization"] import copy -import sys import unittest from typing import List @@ -61,9 +60,6 @@ _QUANT_OPS = { # TODO: rename to TestPortMetadataPass to align with the util name? @unittest.skipIf(IS_WINDOWS, "Windows not yet supported for torch.compile") -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestMetaDataPorting(QuantizationTestCase): def _test_quant_tag_preservation_through_decomp( self, model, example_inputs, from_node_to_tags diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index c95f1a300ea0..30ceb80c39fc 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -1,7 +1,5 @@ # Owner(s): ["oncall: quantization"] from typing import List, Tuple -import sys -import unittest import torch from torch._export import ( @@ -63,7 +61,6 @@ from torch.testing._internal.common_utils import ( @skipIfNoQNNPACK -@unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") class TestQuantizePT2E(PT2EQuantizationTestCase): def test_simple_quantizer(self): # TODO: use OP_TO_ANNOTATOR diff --git a/test/quantization/pt2e/test_quantize_pt2e_qat.py b/test/quantization/pt2e/test_quantize_pt2e_qat.py index 75f5ad0b0ed5..b635c5986d7a 100644 --- a/test/quantization/pt2e/test_quantize_pt2e_qat.py +++ b/test/quantization/pt2e/test_quantize_pt2e_qat.py @@ -1,7 +1,6 @@ # Owner(s): ["oncall: quantization"] import copy import operator -import sys import unittest from typing import Any, Optional, Tuple, Type @@ -765,9 +764,6 @@ class TestQuantizePT2EQAT_ConvBn_Base(PT2EQATTestCase): # TODO: enable this in the next PR @skipIfNoQNNPACK -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestQuantizePT2EQAT_ConvBn1d(TestQuantizePT2EQAT_ConvBn_Base): dim = 1 example_inputs = (torch.randn(1, 3, 5),) @@ -776,9 +772,6 @@ class TestQuantizePT2EQAT_ConvBn1d(TestQuantizePT2EQAT_ConvBn_Base): @skipIfNoQNNPACK -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestQuantizePT2EQAT_ConvBn2d(TestQuantizePT2EQAT_ConvBn_Base): dim = 2 example_inputs = (torch.randn(1, 3, 5, 5),) @@ -906,9 +899,6 @@ class ConvBnDerivedBiasQuantizer(Quantizer): @skipIfNoQNNPACK -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestQuantizePT2EQATModels(PT2EQATTestCase): @skip_if_no_torchvision @skipIfNoQNNPACK diff --git a/test/quantization/pt2e/test_representation.py b/test/quantization/pt2e/test_representation.py index bee1bd96b73d..b4273686c18f 100644 --- a/test/quantization/pt2e/test_representation.py +++ b/test/quantization/pt2e/test_representation.py @@ -1,7 +1,5 @@ # Owner(s): ["oncall: quantization"] import copy -import sys -import unittest from typing import Any, Dict, Tuple import torch @@ -22,9 +20,6 @@ from torch.testing._internal.common_quantization import ( @skipIfNoQNNPACK -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestPT2ERepresentation(QuantizationTestCase): def _test_representation( self, diff --git a/test/quantization/pt2e/test_xnnpack_quantizer.py b/test/quantization/pt2e/test_xnnpack_quantizer.py index 9cec1159cd16..621e6456ebd2 100644 --- a/test/quantization/pt2e/test_xnnpack_quantizer.py +++ b/test/quantization/pt2e/test_xnnpack_quantizer.py @@ -1,8 +1,6 @@ # Owner(s): ["oncall: mobile"] import copy import operator -import sys -import unittest import torch import torch._dynamo as torchdynamo @@ -44,9 +42,6 @@ from torch.testing._internal.common_quantized import override_quantized_engine @skipIfNoQNNPACK -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestXNNPACKQuantizer(PT2EQuantizationTestCase): def test_conv1d(self): quantizer = XNNPACKQuantizer() @@ -990,9 +985,6 @@ class TestXNNPACKQuantizer(PT2EQuantizationTestCase): # TODO: express this using self._test_quantizer, add test for inception_v4 -@unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" -) class TestXNNPACKQuantizerModels(PT2EQuantizationTestCase): @skip_if_no_torchvision @skipIfNoQNNPACK diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index 55ead3684cc1..acda078fe03b 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -3,7 +3,6 @@ import torch import numpy as np -import sys import itertools from itertools import chain from itertools import product @@ -3886,10 +3885,6 @@ class TestBinaryUfuncs(TestCase): test_x((2, 3), 1, [1.0, 2.0, 3.0, 4.0], device) @skipIf(not TEST_SCIPY, "Scipy required for the test.") - # This is failing on Python 3.12. https://github.com/pytorch/pytorch/issues/119462 - @skipIf( - sys.version_info >= (3, 12), "Failing on Python 3.12" - ) def test_cumulative_trapezoid(self, device): import scipy.integrate diff --git a/test/test_custom_ops.py b/test/test_custom_ops.py index 7486d941ebe4..901bf50c9704 100644 --- a/test/test_custom_ops.py +++ b/test/test_custom_ops.py @@ -7,7 +7,6 @@ import collections import itertools import os import re -import sys import typing import torch._custom_ops as custom_ops @@ -27,9 +26,6 @@ import numpy as np def requires_compile(fun): fun = unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work with windows")(fun) - fun = unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - )(fun) return fun @@ -1567,9 +1563,6 @@ def forward(self, x_1): ) @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work on windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) def test_data_dependent_compile(self): import torch._dynamo.testing from torch._dynamo.utils import counters @@ -2115,9 +2108,6 @@ class TestCustomOpAPI(TestCase): self.assertTrue(cpu_called) @skipIfTorchDynamo("Expected to fail due to no FakeTensor support; not a bug") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) def test_fake(self): @torch.library.custom_op("_torch_testing::add", mutated_args=()) def add(x: Tensor, y: float) -> Tensor: @@ -2174,9 +2164,6 @@ Please use `add.register_fake` to add an fake impl.""", @skipIfTorchDynamo("recursive dynamo") @unittest.skipIf(IS_WINDOWS, "torch.compile doesn't work on windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) def test_compile(self): called_impl = False called_abstract = False diff --git a/test/test_decomp.py b/test/test_decomp.py index 4361895e15ae..4e482a92d5eb 100644 --- a/test/test_decomp.py +++ b/test/test_decomp.py @@ -39,7 +39,6 @@ import itertools import functools from functools import partial import unittest -import sys aten = torch.ops.aten @@ -1047,7 +1046,6 @@ class HasDecompTest(TestCase): self.assertExpected("".join(sorted(op.name() + "\n" for op in core_aten_ops))) @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_compile_rrelu(self): def f(x): return torch.rrelu(x) diff --git a/test/test_fake_tensor.py b/test/test_fake_tensor.py index fde35208364c..e31e22349f6a 100644 --- a/test/test_fake_tensor.py +++ b/test/test_fake_tensor.py @@ -1,6 +1,5 @@ # Owner(s): ["module: meta tensors"] -import sys from torch.testing._internal.common_utils import ( TestCase, TEST_WITH_TORCHDYNAMO, run_tests, skipIfCrossRef, skipIfRocm, skipIfTorchDynamo, parametrize, @@ -772,9 +771,6 @@ class FakeTensorTest(TestCase): grad_in = torch.ops.aten._adaptive_avg_pool2d_backward(grad_out, inp) self.assertTrue(torch._prims_common.suggest_memory_format(grad_in) == torch.channels_last) - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) def test_export_numpy(self): class MyNumpyModel(torch.nn.Module): def forward(self, input): diff --git a/test/test_linalg.py b/test/test_linalg.py index 51c70817d605..0eb5d953fa22 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -7,7 +7,6 @@ import unittest import itertools import warnings import math -import sys from math import inf, nan, isnan import random from random import randrange @@ -6027,9 +6026,6 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2: if TEST_WITH_ROCM: self.skipTest("_int4_mm not compiled for ROCM") - if sys.version_info >= (3, 12): - self.skipTest("Dynamo is not supported on Python 3.12+") - q_group = 32 inner_k_tiles = 2 @@ -6119,9 +6115,6 @@ scipy_lobpcg | {eq_err_scipy:10.2e} | {eq_err_general_scipy:10.2e} | {iters2: @parametrize("k", [32, 64]) @parametrize("n", [48, 64]) def test_compile_int8_mm(self, device, m, k, n): - if sys.version_info >= (3, 12): - self.skipTest("Dynamo is not supported on Python 3.12+") - torch.manual_seed(1) a = torch.rand((m, k), dtype=torch.bfloat16, device=device) b = torch.rand((n, k), dtype=torch.bfloat16, device=device) diff --git a/test/test_nestedtensor.py b/test/test_nestedtensor.py index 0c435ce723c3..a2daad1afb1d 100644 --- a/test/test_nestedtensor.py +++ b/test/test_nestedtensor.py @@ -4184,7 +4184,6 @@ class TestNestedTensorSubclass(TestCase): @skipIfTorchDynamo("SDPA test compiles internally") @unittest.skipIf(IS_WINDOWS, reason="Windows not yet supported for torch.compile") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") @skipCUDAIf(not SM70OrLater, "GPU capability is < SM70") # mha_varlen_fwd not supported on ROCm @skipCUDAIfRocm diff --git a/test/test_sparse_semi_structured.py b/test/test_sparse_semi_structured.py index aa9e0a97e168..fcb316ee3019 100644 --- a/test/test_sparse_semi_structured.py +++ b/test/test_sparse_semi_structured.py @@ -2,7 +2,6 @@ import itertools import random import unittest -import sys import torch from torch import nn @@ -197,7 +196,6 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase): assert sparse_result.stride() == sparse_compile_result.stride() @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") @unittest.skipIf("cusparselt" not in SEMI_STRUCTURED_SUPPORTED_BACKENDS, "cusparselt not supported on this machine") def test_mlp_contiguous_relu_compile_cusparselt(self): """ @@ -207,7 +205,6 @@ class SparseSemiStructuredTensorCompileTest(torch._dynamo.test_case.TestCase): SparseSemiStructuredTensorCompileTest._test_mlp_contiguous_relu_compile("cusparselt", dense_input_shape) @unittest.skipIf(IS_WINDOWS, "torch.compile not supported on windows") - @unittest.skipIf(sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+") def test_mlp_contiguous_relu_compile_cutlass(self): """ test for CUTLASS meta registrations (_sparse_semi_structured_linear) + torch.compile diff --git a/test/test_transformers.py b/test/test_transformers.py index b1678c696ae1..af32521fc507 100644 --- a/test/test_transformers.py +++ b/test/test_transformers.py @@ -3426,9 +3426,6 @@ class TestAttnBias(NNTestCase): [(16, 16, 128, 128, 16), (16, 16, 128, 256, 32), (16, 16, 256, 128, 32), (1, 1, 23, 56, 15)], ) @unittest.skipIf(IS_WINDOWS, "torch.compile is not supported on windows") - @unittest.skipIf( - sys.version_info >= (3, 12), "torch.compile is not supported on python 3.12+" - ) @skipIfTorchDynamo("This function already calls torch.compile.") def test_causal_variants_compile(self, device, causal_variant: CausalVariant, shape: List[Tuple[int]]): cnts = CompileCounterWithBackend("aot_eager") diff --git a/tools/dynamo/verify_dynamo.py b/tools/dynamo/verify_dynamo.py index ba2e59e557e2..e62d74043c7c 100644 --- a/tools/dynamo/verify_dynamo.py +++ b/tools/dynamo/verify_dynamo.py @@ -215,8 +215,8 @@ def main(): f"ROCM version: {rocm_ver}\n" ) for args in _SANITY_CHECK_ARGS: - if sys.version_info >= (3, 12): - warnings.warn("Dynamo not yet supported in Python 3.12. Skipping check.") + if sys.version_info >= (3, 13): + warnings.warn("Dynamo not yet supported in Python 3.13. Skipping check.") continue check_dynamo(*args) print("All required checks passed")