diff --git a/test/dynamo/test_activation_checkpointing.py b/test/dynamo/test_activation_checkpointing.py index 245f905cbef7..db1ca7e33533 100644 --- a/test/dynamo/test_activation_checkpointing.py +++ b/test/dynamo/test_activation_checkpointing.py @@ -7,13 +7,11 @@ from importlib import import_module import torch import torch._dynamo.config - import torch._dynamo.test_case import torch._functorch.config import torch.distributed as dist import torch.nn as nn import torch.utils.checkpoint - from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd from torch._dynamo.testing import CompileCounterWithBackend @@ -31,6 +29,7 @@ from torch.utils.checkpoint import ( create_selective_checkpoint_contexts, ) + requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" diff --git a/test/dynamo/test_after_aot.py b/test/dynamo/test_after_aot.py index fb2762151d55..1f8425a3ede7 100644 --- a/test/dynamo/test_after_aot.py +++ b/test/dynamo/test_after_aot.py @@ -8,9 +8,7 @@ import tempfile import unittest import torch._dynamo.test_case - from torch._dynamo.repro.after_aot import InputReader, InputWriter, save_graph_repro - from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_utils import IS_FBCODE from torch.utils._traceback import report_compile_source_on_error diff --git a/test/dynamo/test_aot_autograd.py b/test/dynamo/test_aot_autograd.py index 7e80df205a26..049f4eefdd69 100644 --- a/test/dynamo/test_aot_autograd.py +++ b/test/dynamo/test_aot_autograd.py @@ -6,7 +6,6 @@ from textwrap import dedent from unittest.mock import patch import torch - import torch._dynamo import torch._dynamo.test_case import torch.fx.traceback as fx_traceback diff --git a/test/dynamo/test_aot_autograd_cache.py b/test/dynamo/test_aot_autograd_cache.py index d99fb17bbbe0..d5b3641bbf99 100644 --- a/test/dynamo/test_aot_autograd_cache.py +++ b/test/dynamo/test_aot_autograd_cache.py @@ -7,7 +7,6 @@ from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.test_case - import torch._functorch._aot_autograd from torch._dynamo import config as dynamo_config from torch._dynamo.utils import counters diff --git a/test/dynamo/test_autograd_function.py b/test/dynamo/test_autograd_function.py index 333438757aee..d7cf6bb45668 100644 --- a/test/dynamo/test_autograd_function.py +++ b/test/dynamo/test_autograd_function.py @@ -2,16 +2,15 @@ # flake8: noqa: B950 import copy import math - from dataclasses import dataclass import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda + if HAS_CUDA: import triton diff --git a/test/dynamo/test_backends.py b/test/dynamo/test_backends.py index ca935ea69bc8..522bf4e1f581 100644 --- a/test/dynamo/test_backends.py +++ b/test/dynamo/test_backends.py @@ -2,7 +2,6 @@ import unittest import torch - import torch._dynamo import torch._dynamo.test_case from torch._dynamo.backends.debugging import ExplainWithBackend @@ -12,6 +11,7 @@ from torch._dynamo.testing import same from torch.fx._lazy_graph_module import _force_skip_lazy_graph_module from torch.testing._internal.inductor_utils import HAS_CUDA + requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") diff --git a/test/dynamo/test_backward_higher_order_ops.py b/test/dynamo/test_backward_higher_order_ops.py index b6d44daa5cd0..619bb0920eb3 100644 --- a/test/dynamo/test_backward_higher_order_ops.py +++ b/test/dynamo/test_backward_higher_order_ops.py @@ -4,7 +4,6 @@ import functools import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils diff --git a/test/dynamo/test_base_output.py b/test/dynamo/test_base_output.py index 0db9c7d0cfa7..1ca530d96dc2 100644 --- a/test/dynamo/test_base_output.py +++ b/test/dynamo/test_base_output.py @@ -2,11 +2,11 @@ import unittest.mock import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same + try: from diffusers.models import unet_2d except ImportError: diff --git a/test/dynamo/test_comptime.py b/test/dynamo/test_comptime.py index a14c889a3bce..3d9ce8f85cb6 100644 --- a/test/dynamo/test_comptime.py +++ b/test/dynamo/test_comptime.py @@ -9,6 +9,7 @@ import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.comptime import comptime + # Because we don't support free variables in comptime at the moment, # we have to communicate via globals. This also means these tests cannot # be run in parallel in a single process (not that you'd... ever want diff --git a/test/dynamo/test_config.py b/test/dynamo/test_config.py index 05e941118dfa..33149d5831fb 100644 --- a/test/dynamo/test_config.py +++ b/test/dynamo/test_config.py @@ -5,6 +5,7 @@ import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.utils import disable_cache_limit + # NB: do NOT include this test class in test_dynamic_shapes.py diff --git a/test/dynamo/test_ctx_manager.py b/test/dynamo/test_ctx_manager.py index 47f8e8eeb863..2a247394dace 100644 --- a/test/dynamo/test_ctx_manager.py +++ b/test/dynamo/test_ctx_manager.py @@ -2,12 +2,10 @@ import unittest import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch.onnx.operators from torch._dynamo.testing import EagerAndRecordGraphs, normalize_gm, same - from torch.nn import functional as F from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION from torch.testing._internal.common_utils import TEST_WITH_ROCM diff --git a/test/dynamo/test_cudagraphs.py b/test/dynamo/test_cudagraphs.py index be0d9e2f9b51..a83d61267e74 100644 --- a/test/dynamo/test_cudagraphs.py +++ b/test/dynamo/test_cudagraphs.py @@ -4,7 +4,6 @@ import functools import unittest import torch - import torch._dynamo import torch._dynamo.config import torch._dynamo.test_case diff --git a/test/dynamo/test_cudagraphs_expandable_segments.py b/test/dynamo/test_cudagraphs_expandable_segments.py index e10139333a4f..fe8d23dc82a7 100644 --- a/test/dynamo/test_cudagraphs_expandable_segments.py +++ b/test/dynamo/test_cudagraphs_expandable_segments.py @@ -6,20 +6,22 @@ import pathlib import sys import torch - from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS from torch.testing._internal.common_utils import run_tests + pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) from dynamo.test_cudagraphs import TestAotCudagraphs # noqa: F401 + REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent sys.path.insert(0, str(REPO_ROOT)) from tools.stats.import_test_stats import get_disabled_tests + # Make sure to remove REPO_ROOT after import is done sys.path.remove(str(REPO_ROOT)) diff --git a/test/dynamo/test_debug_utils.py b/test/dynamo/test_debug_utils.py index 0c305f800e58..d4622c6e601e 100644 --- a/test/dynamo/test_debug_utils.py +++ b/test/dynamo/test_debug_utils.py @@ -3,13 +3,13 @@ import unittest import torch - from functorch import make_fx from torch._dynamo import debug_utils from torch._dynamo.debug_utils import aot_graph_input_parser from torch._dynamo.test_case import TestCase from torch.testing._internal.inductor_utils import HAS_CUDA + requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") f32 = torch.float32 diff --git a/test/dynamo/test_decorators.py b/test/dynamo/test_decorators.py index 6b74951ec2cb..05065c9c20eb 100644 --- a/test/dynamo/test_decorators.py +++ b/test/dynamo/test_decorators.py @@ -5,7 +5,6 @@ import unittest.mock as mock from unittest.mock import patch import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.exc import IncorrectUsage diff --git a/test/dynamo/test_deviceguard.py b/test/dynamo/test_deviceguard.py index bd3c73a7b578..2d1d267c9379 100644 --- a/test/dynamo/test_deviceguard.py +++ b/test/dynamo/test_deviceguard.py @@ -3,7 +3,6 @@ import unittest from unittest.mock import Mock import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.device_interface import CudaInterface, DeviceGuard diff --git a/test/dynamo/test_dynamic_shapes.py b/test/dynamo/test_dynamic_shapes.py index a3c63ef66152..6d3537fe6618 100644 --- a/test/dynamo/test_dynamic_shapes.py +++ b/test/dynamo/test_dynamic_shapes.py @@ -7,6 +7,7 @@ from torch._dynamo.testing import make_test_cls_with_patches from torch.fx.experimental import _config as fx_config from torch.testing._internal.common_utils import slowTest, TEST_Z3 + try: from . import ( test_aot_autograd, @@ -27,6 +28,7 @@ except ImportError: import test_functions import test_higher_order_ops import test_misc + import test_modules import test_repros import test_sdpa diff --git a/test/dynamo/test_exceptions.py b/test/dynamo/test_exceptions.py index efb5375ebf7c..7a63ed776aad 100644 --- a/test/dynamo/test_exceptions.py +++ b/test/dynamo/test_exceptions.py @@ -2,7 +2,6 @@ import torch import torch._dynamo.config - import torch._dynamo.test_case import torch._functorch.config import torch.utils.checkpoint diff --git a/test/dynamo/test_export.py b/test/dynamo/test_export.py index d882224797ac..aeffdf0a92d9 100644 --- a/test/dynamo/test_export.py +++ b/test/dynamo/test_export.py @@ -17,7 +17,6 @@ import torch import torch._dynamo import torch._dynamo.test_case import torch._dynamo.testing - from functorch.experimental.control_flow import cond from torch._dynamo import config from torch._dynamo.exc import UserError diff --git a/test/dynamo/test_frame_init.py b/test/dynamo/test_frame_init.py index 6e3963aa109f..73f3f57dc58e 100644 --- a/test/dynamo/test_frame_init.py +++ b/test/dynamo/test_frame_init.py @@ -4,6 +4,7 @@ import torch import torch._dynamo.test_case from torch._guards import CompileId + set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401 diff --git a/test/dynamo/test_functions.py b/test/dynamo/test_functions.py index 22b77ed75132..fc23ffc159d9 100644 --- a/test/dynamo/test_functions.py +++ b/test/dynamo/test_functions.py @@ -16,7 +16,6 @@ from unittest.mock import patch import numpy as np import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch import sub @@ -28,7 +27,6 @@ from torch._dynamo.testing import ( from torch._dynamo.utils import ifdynstaticdefault, same from torch._dynamo.variables import ConstantVariable from torch._dynamo.variables.lists import RangeVariable - from torch.nn import functional as F from torch.testing._internal.common_utils import ( disable_translation_validation_if_dynamic_shapes, @@ -39,6 +37,7 @@ from torch.testing._internal.common_utils import ( # Defines all the kernels for tests from torch.testing._internal.triton_utils import * # noqa: F403 + d = torch.ones(10, 10) e = torch.nn.Linear(10, 10) flag = True diff --git a/test/dynamo/test_fx_passes_pre_grad.py b/test/dynamo/test_fx_passes_pre_grad.py index 829e54951c83..1edd0b9fdd57 100644 --- a/test/dynamo/test_fx_passes_pre_grad.py +++ b/test/dynamo/test_fx_passes_pre_grad.py @@ -2,7 +2,6 @@ from unittest import mock import torch - import torch._dynamo import torch._dynamo.test_case from torch._inductor.utils import pass_execution_and_save diff --git a/test/dynamo/test_global.py b/test/dynamo/test_global.py index f265bb9b1c78..35cb07132507 100644 --- a/test/dynamo/test_global.py +++ b/test/dynamo/test_global.py @@ -1,10 +1,10 @@ # Owner(s): ["module: dynamo"] import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same + try: from . import utils except ImportError: diff --git a/test/dynamo/test_guard_manager.py b/test/dynamo/test_guard_manager.py index 5e009114786a..1dbd542c5766 100644 --- a/test/dynamo/test_guard_manager.py +++ b/test/dynamo/test_guard_manager.py @@ -9,6 +9,7 @@ from torch._C._dynamo import guards from torch._dynamo.convert_frame import GlobalStateGuard from torch.testing._internal.common_utils import set_default_dtype + RootGuardManager = guards.RootGuardManager DictGuardManager = guards.DictGuardManager DictSubclassGuardManager = guards.DictSubclassGuardManager diff --git a/test/dynamo/test_higher_order_ops.py b/test/dynamo/test_higher_order_ops.py index 8637fe26d9e6..a6ac858ce79c 100644 --- a/test/dynamo/test_higher_order_ops.py +++ b/test/dynamo/test_higher_order_ops.py @@ -7,10 +7,8 @@ import unittest import warnings import functorch.experimental.control_flow as control_flow - import torch import torch._dynamo.config as config - import torch._dynamo.test_case import torch._functorch.config import torch.nn as nn diff --git a/test/dynamo/test_hooks.py b/test/dynamo/test_hooks.py index c506dccef8b9..168dddbced62 100644 --- a/test/dynamo/test_hooks.py +++ b/test/dynamo/test_hooks.py @@ -8,7 +8,6 @@ import torch import torch._dynamo import torch._dynamo.test_case import torch._dynamo.testing - from functorch.compile import nop from torch._dynamo import compiled_autograd from torch._functorch.aot_autograd import aot_module_simplified diff --git a/test/dynamo/test_inline_inbuilt_nn_modules.py b/test/dynamo/test_inline_inbuilt_nn_modules.py index 60037b79775d..d950572d2bf9 100644 --- a/test/dynamo/test_inline_inbuilt_nn_modules.py +++ b/test/dynamo/test_inline_inbuilt_nn_modules.py @@ -4,6 +4,7 @@ import unittest from torch._dynamo import config from torch._dynamo.testing import make_test_cls_with_patches + try: from . import ( test_aot_autograd, @@ -11,13 +12,13 @@ try: test_higher_order_ops, test_misc, test_modules, - # test_repros, ) except ImportError: import test_aot_autograd import test_functions import test_higher_order_ops import test_misc + import test_modules diff --git a/test/dynamo/test_interop.py b/test/dynamo/test_interop.py index 063e48fe8f5d..416e71d4f57f 100644 --- a/test/dynamo/test_interop.py +++ b/test/dynamo/test_interop.py @@ -1,6 +1,5 @@ # Owner(s): ["module: dynamo"] import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch.onnx.operators diff --git a/test/dynamo/test_logging.py b/test/dynamo/test_logging.py index 6fc9608c87a4..247ffebd8932 100644 --- a/test/dynamo/test_logging.py +++ b/test/dynamo/test_logging.py @@ -10,9 +10,7 @@ import torch._dynamo.test_case import torch._dynamo.testing import torch.distributed as dist from torch._dynamo.testing import skipIfNotPy311 - from torch.nn.parallel import DistributedDataParallel as DDP - from torch.testing._internal.common_utils import ( find_free_port, munge_exc, @@ -25,6 +23,7 @@ from torch.testing._internal.logging_utils import ( make_settings_test, ) + requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" diff --git a/test/dynamo/test_minifier.py b/test/dynamo/test_minifier.py index 9014be6f7557..6b0ea5caed2f 100644 --- a/test/dynamo/test_minifier.py +++ b/test/dynamo/test_minifier.py @@ -5,6 +5,7 @@ import torch._dynamo from torch._dynamo.test_minifier_common import MinifierTestBase from torch.testing._internal.common_utils import skipIfNNModuleInlined + requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "requires cuda") diff --git a/test/dynamo/test_misc.py b/test/dynamo/test_misc.py index ca175ba9bd9c..3570aae3690f 100644 --- a/test/dynamo/test_misc.py +++ b/test/dynamo/test_misc.py @@ -28,10 +28,8 @@ import numpy as np import torch import torch._dynamo.testing - import torch._inductor.test_case import torch.onnx.operators - import torch.utils._pytree as pytree import torch.utils.cpp_extension from torch import Tensor @@ -85,6 +83,7 @@ from torch.testing._internal.common_utils import ( from torch.testing._internal.jit_utils import JitTestCase from torch.testing._internal.logging_utils import logs_to_string + mytuple = collections.namedtuple("mytuple", ["a", "b", "ab"]) T = typing.TypeVar("T") diff --git a/test/dynamo/test_model_output.py b/test/dynamo/test_model_output.py index f3fdf5c3c115..960fd12f5b2b 100644 --- a/test/dynamo/test_model_output.py +++ b/test/dynamo/test_model_output.py @@ -3,11 +3,11 @@ import dataclasses import unittest.mock import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import same + try: from transformers import modeling_outputs from transformers.configuration_utils import PretrainedConfig diff --git a/test/dynamo/test_modules.py b/test/dynamo/test_modules.py index b945b05adc8b..8ae1e0b8b3a8 100644 --- a/test/dynamo/test_modules.py +++ b/test/dynamo/test_modules.py @@ -14,7 +14,6 @@ from typing import Dict, NamedTuple, Tuple from unittest.mock import patch import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch.nn.functional as F @@ -25,6 +24,7 @@ from torch._dynamo.testing import expectedFailureDynamic, same from torch.nn.modules.lazy import LazyModuleMixin from torch.nn.parameter import Parameter, UninitializedParameter + try: from . import test_functions except ImportError: diff --git a/test/dynamo/test_nops.py b/test/dynamo/test_nops.py index c17b9528a4f8..664a0f61bf6d 100644 --- a/test/dynamo/test_nops.py +++ b/test/dynamo/test_nops.py @@ -1,11 +1,11 @@ # Owner(s): ["module: dynamo"] import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo import eval_frame from torch._dynamo.hooks import Hooks + c = 10 diff --git a/test/dynamo/test_optimizers.py b/test/dynamo/test_optimizers.py index 779dc8aa6c9c..7948aee56eb0 100644 --- a/test/dynamo/test_optimizers.py +++ b/test/dynamo/test_optimizers.py @@ -1,14 +1,11 @@ +# Owner(s): ["module: dynamo"] """ PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes with test_adam in OptimizerTests) """ import functools -# Owner(s): ["module: dynamo"] - - import torch - import torch._dynamo import torch._dynamo.test_case import torch._dynamo.testing diff --git a/test/dynamo/test_pre_dispatch.py b/test/dynamo/test_pre_dispatch.py index 6979b85506b6..f85099040b96 100644 --- a/test/dynamo/test_pre_dispatch.py +++ b/test/dynamo/test_pre_dispatch.py @@ -1,6 +1,5 @@ # Owner(s): ["module: dynamo"] import torch - import torch._dynamo import torch._dynamo.test_case diff --git a/test/dynamo/test_profiler.py b/test/dynamo/test_profiler.py index 679a475e92f7..b36fff046cbc 100644 --- a/test/dynamo/test_profiler.py +++ b/test/dynamo/test_profiler.py @@ -2,13 +2,10 @@ from unittest.mock import patch import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils - from torch._dynamo.utils import dynamo_timed - from torch.testing._internal.common_utils import TemporaryFileName diff --git a/test/dynamo/test_python_autograd.py b/test/dynamo/test_python_autograd.py index c936e4cd77e5..defc71a97afc 100644 --- a/test/dynamo/test_python_autograd.py +++ b/test/dynamo/test_python_autograd.py @@ -2,11 +2,11 @@ from typing import Callable, Dict, List, NamedTuple, Optional import torch - import torch._dynamo from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.testing import CompileCounter, same + """ This is an example of a pure-python version of autograd implemented by @zdevito. It represents a rather challenging test case for TorchDynamo diff --git a/test/dynamo/test_recompile_ux.py b/test/dynamo/test_recompile_ux.py index 8c456842c20d..22d297735f13 100644 --- a/test/dynamo/test_recompile_ux.py +++ b/test/dynamo/test_recompile_ux.py @@ -3,12 +3,10 @@ import unittest import weakref import torch - import torch._dynamo import torch._dynamo.config import torch._dynamo.test_case import torch._dynamo.testing - import torch._logging from torch.testing._internal.logging_utils import kwargs_to_settings, log_settings diff --git a/test/dynamo/test_recompiles.py b/test/dynamo/test_recompiles.py index ff39d0c8052a..e05020412491 100644 --- a/test/dynamo/test_recompiles.py +++ b/test/dynamo/test_recompiles.py @@ -2,7 +2,6 @@ from unittest.mock import patch import torch - import torch._dynamo.test_case import torch._dynamo.testing diff --git a/test/dynamo/test_repros.py b/test/dynamo/test_repros.py index 4ca3daf4414a..e3c185da7ffb 100644 --- a/test/dynamo/test_repros.py +++ b/test/dynamo/test_repros.py @@ -27,11 +27,9 @@ from unittest import mock import numpy as np import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch._dynamo.utils - import torch._functorch.config import torch.library import torch.utils._pytree as pytree @@ -40,7 +38,6 @@ from torch._dynamo.debug_utils import same_two_models from torch._dynamo.testing import CompileCounter, rand_strided, same from torch._inductor.utils import fresh_inductor_cache from torch.nn import functional as F - from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION from torch.testing._internal.common_utils import ( disable_translation_validation_if_dynamic_shapes, diff --git a/test/dynamo/test_skip_non_tensor.py b/test/dynamo/test_skip_non_tensor.py index 3ced7859cd7e..72153d26a1ff 100644 --- a/test/dynamo/test_skip_non_tensor.py +++ b/test/dynamo/test_skip_non_tensor.py @@ -2,11 +2,11 @@ from unittest.mock import patch import torch - import torch._dynamo import torch._dynamo.test_case from torch._dynamo.testing import CompileCounter + _variable = 0 _variable_2 = 0 diff --git a/test/dynamo/test_structured_trace.py b/test/dynamo/test_structured_trace.py index b8883d03ff75..d1577272d797 100644 --- a/test/dynamo/test_structured_trace.py +++ b/test/dynamo/test_structured_trace.py @@ -15,14 +15,13 @@ import torch._dynamo.test_case import torch._dynamo.testing import torch._logging.structured import torch.distributed as dist - from torch._inductor.test_case import TestCase - from torch._logging._internal import TorchLogsFormatter from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_utils import find_free_port from torch.testing._internal.inductor_utils import HAS_CUDA + requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_distributed = functools.partial( unittest.skipIf, not dist.is_available(), "requires distributed" diff --git a/test/dynamo/test_subclasses.py b/test/dynamo/test_subclasses.py index 2bf39c9220a6..639776266f0f 100644 --- a/test/dynamo/test_subclasses.py +++ b/test/dynamo/test_subclasses.py @@ -2,11 +2,9 @@ import functools import itertools import unittest - from functools import partial import torch - import torch._dynamo.test_case import torch._dynamo.testing import torch._functorch.config @@ -14,7 +12,6 @@ import torch.utils._pytree as pytree import torch.utils.checkpoint from torch._dynamo.testing import normalize_gm from torch._higher_order_ops.wrap import wrap - from torch.fx.experimental.symbolic_shapes import ( DimDynamic, ShapeEnv, diff --git a/test/dynamo/test_subgraphs.py b/test/dynamo/test_subgraphs.py index ec5898d70974..6a0df535e18e 100644 --- a/test/dynamo/test_subgraphs.py +++ b/test/dynamo/test_subgraphs.py @@ -2,12 +2,12 @@ from unittest.mock import patch import torch - import torch._dynamo.test_case import torch._dynamo.testing from torch._dynamo.testing import unsupported from torch._dynamo.utils import ifdynstaticdefault + globalmod = torch.nn.ReLU() diff --git a/test/dynamo/test_torchrec.py b/test/dynamo/test_torchrec.py index 3a625c1083dc..867cba34587d 100644 --- a/test/dynamo/test_torchrec.py +++ b/test/dynamo/test_torchrec.py @@ -4,7 +4,6 @@ import unittest from typing import Dict, List import torch - import torch._dynamo.config import torch._dynamo.test_case from torch import nn @@ -12,6 +11,7 @@ from torch._dynamo.test_case import TestCase from torch._dynamo.testing import CompileCounter from torch.testing._internal.common_utils import NoTest + try: from torchrec.datasets.random import RandomRecDataset from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor diff --git a/test/dynamo/test_trace_rules.py b/test/dynamo/test_trace_rules.py index a0585d0b2cd7..79e7ce0bb34d 100644 --- a/test/dynamo/test_trace_rules.py +++ b/test/dynamo/test_trace_rules.py @@ -23,6 +23,7 @@ from torch._dynamo.trace_rules import ( from torch._dynamo.utils import hashable, is_safe_constant, istype from torch._dynamo.variables import TorchInGraphFunctionVariable, UserFunctionVariable + try: from .utils import create_dummy_module_and_function except ImportError: diff --git a/test/dynamo/test_unspec.py b/test/dynamo/test_unspec.py index 7c3bf60138ae..27caa4e922c3 100644 --- a/test/dynamo/test_unspec.py +++ b/test/dynamo/test_unspec.py @@ -9,7 +9,6 @@ import torch import torch._dynamo.test_case import torch._dynamo.testing import torch.nn.functional as F - from torch._dynamo.comptime import comptime from torch._dynamo.testing import CompileCounter, same from torch.testing._internal.logging_utils import logs_to_string diff --git a/test/dynamo/test_verify_correctness.py b/test/dynamo/test_verify_correctness.py index 99f37e1a1235..0a10704d77cb 100644 --- a/test/dynamo/test_verify_correctness.py +++ b/test/dynamo/test_verify_correctness.py @@ -2,7 +2,6 @@ import operator import torch - import torch._dynamo import torch._dynamo.config as config import torch._dynamo.test_case diff --git a/test/dynamo/test_view.py b/test/dynamo/test_view.py index 2d63e86af162..61b80f7bd8b0 100644 --- a/test/dynamo/test_view.py +++ b/test/dynamo/test_view.py @@ -1,6 +1,5 @@ # Owner(s): ["module: dynamo"] import torch - import torch._dynamo import torch._dynamo.test_case diff --git a/test/dynamo/utils.py b/test/dynamo/utils.py index 719eec47d9da..6309ff4c1f96 100644 --- a/test/dynamo/utils.py +++ b/test/dynamo/utils.py @@ -7,6 +7,7 @@ import types import torch import torch._dynamo + g_tensor_export = torch.ones(10) diff --git a/tools/linter/adapters/ufmt_linter.py b/tools/linter/adapters/ufmt_linter.py index d92921fd4157..0e62bc3a3e7f 100644 --- a/tools/linter/adapters/ufmt_linter.py +++ b/tools/linter/adapters/ufmt_linter.py @@ -40,7 +40,6 @@ ISORT_SKIPLIST = re.compile( "test/[a-c]*/**", # test/d*/** # test/dy*/** - "test/dy*/**", # test/[e-h]*/** # test/i*/** # test/j*/**