[BE][Easy][11/19] enforce style for empty lines in import segments in test/dy*/ (#129762)

See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter.

You can review these PRs via:

```bash
git diff --ignore-all-space --ignore-blank-lines HEAD~1
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129762
Approved by: https://github.com/anijain2305
This commit is contained in:
Xuehai Pan
2024-07-27 23:04:46 +08:00
committed by PyTorch MergeBot
parent ae9f17a821
commit 918ece4f4d
52 changed files with 32 additions and 63 deletions

View File

@ -7,13 +7,11 @@ from importlib import import_module
import torch import torch
import torch._dynamo.config import torch._dynamo.config
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._functorch.config import torch._functorch.config
import torch.distributed as dist import torch.distributed as dist
import torch.nn as nn import torch.nn as nn
import torch.utils.checkpoint import torch.utils.checkpoint
from functorch.compile import min_cut_rematerialization_partition from functorch.compile import min_cut_rematerialization_partition
from torch._dynamo.backends.common import aot_autograd from torch._dynamo.backends.common import aot_autograd
from torch._dynamo.testing import CompileCounterWithBackend from torch._dynamo.testing import CompileCounterWithBackend
@ -31,6 +29,7 @@ from torch.utils.checkpoint import (
create_selective_checkpoint_contexts, create_selective_checkpoint_contexts,
) )
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
requires_distributed = functools.partial( requires_distributed = functools.partial(
unittest.skipIf, not dist.is_available(), "requires distributed" unittest.skipIf, not dist.is_available(), "requires distributed"

View File

@ -8,9 +8,7 @@ import tempfile
import unittest import unittest
import torch._dynamo.test_case import torch._dynamo.test_case
from torch._dynamo.repro.after_aot import InputReader, InputWriter, save_graph_repro from torch._dynamo.repro.after_aot import InputReader, InputWriter, save_graph_repro
from torch.fx.experimental.proxy_tensor import make_fx from torch.fx.experimental.proxy_tensor import make_fx
from torch.testing._internal.common_utils import IS_FBCODE from torch.testing._internal.common_utils import IS_FBCODE
from torch.utils._traceback import report_compile_source_on_error from torch.utils._traceback import report_compile_source_on_error

View File

@ -6,7 +6,6 @@ from textwrap import dedent
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
import torch.fx.traceback as fx_traceback import torch.fx.traceback as fx_traceback

View File

@ -7,7 +7,6 @@ from unittest.mock import patch
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._functorch._aot_autograd import torch._functorch._aot_autograd
from torch._dynamo import config as dynamo_config from torch._dynamo import config as dynamo_config
from torch._dynamo.utils import counters from torch._dynamo.utils import counters

View File

@ -2,16 +2,15 @@
# flake8: noqa: B950 # flake8: noqa: B950
import copy import copy
import math import math
from dataclasses import dataclass from dataclasses import dataclass
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._dynamo.utils import torch._dynamo.utils
from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda
if HAS_CUDA: if HAS_CUDA:
import triton import triton

View File

@ -2,7 +2,6 @@
import unittest import unittest
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
from torch._dynamo.backends.debugging import ExplainWithBackend from torch._dynamo.backends.debugging import ExplainWithBackend
@ -12,6 +11,7 @@ from torch._dynamo.testing import same
from torch.fx._lazy_graph_module import _force_skip_lazy_graph_module from torch.fx._lazy_graph_module import _force_skip_lazy_graph_module
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")

View File

@ -4,7 +4,6 @@
import functools import functools
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._dynamo.utils import torch._dynamo.utils

View File

@ -2,11 +2,11 @@
import unittest.mock import unittest.mock
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.testing import same from torch._dynamo.testing import same
try: try:
from diffusers.models import unet_2d from diffusers.models import unet_2d
except ImportError: except ImportError:

View File

@ -9,6 +9,7 @@ import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.comptime import comptime from torch._dynamo.comptime import comptime
# Because we don't support free variables in comptime at the moment, # Because we don't support free variables in comptime at the moment,
# we have to communicate via globals. This also means these tests cannot # we have to communicate via globals. This also means these tests cannot
# be run in parallel in a single process (not that you'd... ever want # be run in parallel in a single process (not that you'd... ever want

View File

@ -5,6 +5,7 @@ import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.utils import disable_cache_limit from torch._dynamo.utils import disable_cache_limit
# NB: do NOT include this test class in test_dynamic_shapes.py # NB: do NOT include this test class in test_dynamic_shapes.py

View File

@ -2,12 +2,10 @@
import unittest import unittest
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch.onnx.operators import torch.onnx.operators
from torch._dynamo.testing import EagerAndRecordGraphs, normalize_gm, same from torch._dynamo.testing import EagerAndRecordGraphs, normalize_gm, same
from torch.nn import functional as F from torch.nn import functional as F
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_utils import TEST_WITH_ROCM from torch.testing._internal.common_utils import TEST_WITH_ROCM

View File

@ -4,7 +4,6 @@ import functools
import unittest import unittest
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.config import torch._dynamo.config
import torch._dynamo.test_case import torch._dynamo.test_case

View File

@ -6,20 +6,22 @@ import pathlib
import sys import sys
import torch import torch
from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS
from torch.testing._internal.common_utils import run_tests from torch.testing._internal.common_utils import run_tests
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir) sys.path.append(pytorch_test_dir)
from dynamo.test_cudagraphs import TestAotCudagraphs # noqa: F401 from dynamo.test_cudagraphs import TestAotCudagraphs # noqa: F401
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent
sys.path.insert(0, str(REPO_ROOT)) sys.path.insert(0, str(REPO_ROOT))
from tools.stats.import_test_stats import get_disabled_tests from tools.stats.import_test_stats import get_disabled_tests
# Make sure to remove REPO_ROOT after import is done # Make sure to remove REPO_ROOT after import is done
sys.path.remove(str(REPO_ROOT)) sys.path.remove(str(REPO_ROOT))

View File

@ -3,13 +3,13 @@
import unittest import unittest
import torch import torch
from functorch import make_fx from functorch import make_fx
from torch._dynamo import debug_utils from torch._dynamo import debug_utils
from torch._dynamo.debug_utils import aot_graph_input_parser from torch._dynamo.debug_utils import aot_graph_input_parser
from torch._dynamo.test_case import TestCase from torch._dynamo.test_case import TestCase
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
f32 = torch.float32 f32 = torch.float32

View File

@ -5,7 +5,6 @@ import unittest.mock as mock
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.exc import IncorrectUsage from torch._dynamo.exc import IncorrectUsage

View File

@ -3,7 +3,6 @@ import unittest
from unittest.mock import Mock from unittest.mock import Mock
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.device_interface import CudaInterface, DeviceGuard from torch._dynamo.device_interface import CudaInterface, DeviceGuard

View File

@ -7,6 +7,7 @@ from torch._dynamo.testing import make_test_cls_with_patches
from torch.fx.experimental import _config as fx_config from torch.fx.experimental import _config as fx_config
from torch.testing._internal.common_utils import slowTest, TEST_Z3 from torch.testing._internal.common_utils import slowTest, TEST_Z3
try: try:
from . import ( from . import (
test_aot_autograd, test_aot_autograd,
@ -27,6 +28,7 @@ except ImportError:
import test_functions import test_functions
import test_higher_order_ops import test_higher_order_ops
import test_misc import test_misc
import test_modules import test_modules
import test_repros import test_repros
import test_sdpa import test_sdpa

View File

@ -2,7 +2,6 @@
import torch import torch
import torch._dynamo.config import torch._dynamo.config
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._functorch.config import torch._functorch.config
import torch.utils.checkpoint import torch.utils.checkpoint

View File

@ -17,7 +17,6 @@ import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from functorch.experimental.control_flow import cond from functorch.experimental.control_flow import cond
from torch._dynamo import config from torch._dynamo import config
from torch._dynamo.exc import UserError from torch._dynamo.exc import UserError

View File

@ -4,6 +4,7 @@ import torch
import torch._dynamo.test_case import torch._dynamo.test_case
from torch._guards import CompileId from torch._guards import CompileId
set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401 set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401

View File

@ -16,7 +16,6 @@ from unittest.mock import patch
import numpy as np import numpy as np
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch import sub from torch import sub
@ -28,7 +27,6 @@ from torch._dynamo.testing import (
from torch._dynamo.utils import ifdynstaticdefault, same from torch._dynamo.utils import ifdynstaticdefault, same
from torch._dynamo.variables import ConstantVariable from torch._dynamo.variables import ConstantVariable
from torch._dynamo.variables.lists import RangeVariable from torch._dynamo.variables.lists import RangeVariable
from torch.nn import functional as F from torch.nn import functional as F
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
disable_translation_validation_if_dynamic_shapes, disable_translation_validation_if_dynamic_shapes,
@ -39,6 +37,7 @@ from torch.testing._internal.common_utils import (
# Defines all the kernels for tests # Defines all the kernels for tests
from torch.testing._internal.triton_utils import * # noqa: F403 from torch.testing._internal.triton_utils import * # noqa: F403
d = torch.ones(10, 10) d = torch.ones(10, 10)
e = torch.nn.Linear(10, 10) e = torch.nn.Linear(10, 10)
flag = True flag = True

View File

@ -2,7 +2,6 @@
from unittest import mock from unittest import mock
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
from torch._inductor.utils import pass_execution_and_save from torch._inductor.utils import pass_execution_and_save

View File

@ -1,10 +1,10 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.testing import same from torch._dynamo.testing import same
try: try:
from . import utils from . import utils
except ImportError: except ImportError:

View File

@ -9,6 +9,7 @@ from torch._C._dynamo import guards
from torch._dynamo.convert_frame import GlobalStateGuard from torch._dynamo.convert_frame import GlobalStateGuard
from torch.testing._internal.common_utils import set_default_dtype from torch.testing._internal.common_utils import set_default_dtype
RootGuardManager = guards.RootGuardManager RootGuardManager = guards.RootGuardManager
DictGuardManager = guards.DictGuardManager DictGuardManager = guards.DictGuardManager
DictSubclassGuardManager = guards.DictSubclassGuardManager DictSubclassGuardManager = guards.DictSubclassGuardManager

View File

@ -7,10 +7,8 @@ import unittest
import warnings import warnings
import functorch.experimental.control_flow as control_flow import functorch.experimental.control_flow as control_flow
import torch import torch
import torch._dynamo.config as config import torch._dynamo.config as config
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._functorch.config import torch._functorch.config
import torch.nn as nn import torch.nn as nn

View File

@ -8,7 +8,6 @@ import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from functorch.compile import nop from functorch.compile import nop
from torch._dynamo import compiled_autograd from torch._dynamo import compiled_autograd
from torch._functorch.aot_autograd import aot_module_simplified from torch._functorch.aot_autograd import aot_module_simplified

View File

@ -4,6 +4,7 @@ import unittest
from torch._dynamo import config from torch._dynamo import config
from torch._dynamo.testing import make_test_cls_with_patches from torch._dynamo.testing import make_test_cls_with_patches
try: try:
from . import ( from . import (
test_aot_autograd, test_aot_autograd,
@ -11,13 +12,13 @@ try:
test_higher_order_ops, test_higher_order_ops,
test_misc, test_misc,
test_modules, test_modules,
# test_repros,
) )
except ImportError: except ImportError:
import test_aot_autograd import test_aot_autograd
import test_functions import test_functions
import test_higher_order_ops import test_higher_order_ops
import test_misc import test_misc
import test_modules import test_modules

View File

@ -1,6 +1,5 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch.onnx.operators import torch.onnx.operators

View File

@ -10,9 +10,7 @@ import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch.distributed as dist import torch.distributed as dist
from torch._dynamo.testing import skipIfNotPy311 from torch._dynamo.testing import skipIfNotPy311
from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
find_free_port, find_free_port,
munge_exc, munge_exc,
@ -25,6 +23,7 @@ from torch.testing._internal.logging_utils import (
make_settings_test, make_settings_test,
) )
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
requires_distributed = functools.partial( requires_distributed = functools.partial(
unittest.skipIf, not dist.is_available(), "requires distributed" unittest.skipIf, not dist.is_available(), "requires distributed"

View File

@ -5,6 +5,7 @@ import torch._dynamo
from torch._dynamo.test_minifier_common import MinifierTestBase from torch._dynamo.test_minifier_common import MinifierTestBase
from torch.testing._internal.common_utils import skipIfNNModuleInlined from torch.testing._internal.common_utils import skipIfNNModuleInlined
requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "requires cuda") requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "requires cuda")

View File

@ -28,10 +28,8 @@ import numpy as np
import torch import torch
import torch._dynamo.testing import torch._dynamo.testing
import torch._inductor.test_case import torch._inductor.test_case
import torch.onnx.operators import torch.onnx.operators
import torch.utils._pytree as pytree import torch.utils._pytree as pytree
import torch.utils.cpp_extension import torch.utils.cpp_extension
from torch import Tensor from torch import Tensor
@ -85,6 +83,7 @@ from torch.testing._internal.common_utils import (
from torch.testing._internal.jit_utils import JitTestCase from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.logging_utils import logs_to_string from torch.testing._internal.logging_utils import logs_to_string
mytuple = collections.namedtuple("mytuple", ["a", "b", "ab"]) mytuple = collections.namedtuple("mytuple", ["a", "b", "ab"])
T = typing.TypeVar("T") T = typing.TypeVar("T")

View File

@ -3,11 +3,11 @@ import dataclasses
import unittest.mock import unittest.mock
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.testing import same from torch._dynamo.testing import same
try: try:
from transformers import modeling_outputs from transformers import modeling_outputs
from transformers.configuration_utils import PretrainedConfig from transformers.configuration_utils import PretrainedConfig

View File

@ -14,7 +14,6 @@ from typing import Dict, NamedTuple, Tuple
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch.nn.functional as F import torch.nn.functional as F
@ -25,6 +24,7 @@ from torch._dynamo.testing import expectedFailureDynamic, same
from torch.nn.modules.lazy import LazyModuleMixin from torch.nn.modules.lazy import LazyModuleMixin
from torch.nn.parameter import Parameter, UninitializedParameter from torch.nn.parameter import Parameter, UninitializedParameter
try: try:
from . import test_functions from . import test_functions
except ImportError: except ImportError:

View File

@ -1,11 +1,11 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo import eval_frame from torch._dynamo import eval_frame
from torch._dynamo.hooks import Hooks from torch._dynamo.hooks import Hooks
c = 10 c = 10

View File

@ -1,14 +1,11 @@
# Owner(s): ["module: dynamo"]
""" """
PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
with test_adam in OptimizerTests) with test_adam in OptimizerTests)
""" """
import functools import functools
# Owner(s): ["module: dynamo"]
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing

View File

@ -1,6 +1,5 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case

View File

@ -2,13 +2,10 @@
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._dynamo.utils import torch._dynamo.utils
from torch._dynamo.utils import dynamo_timed from torch._dynamo.utils import dynamo_timed
from torch.testing._internal.common_utils import TemporaryFileName from torch.testing._internal.common_utils import TemporaryFileName

View File

@ -2,11 +2,11 @@
from typing import Callable, Dict, List, NamedTuple, Optional from typing import Callable, Dict, List, NamedTuple, Optional
import torch import torch
import torch._dynamo import torch._dynamo
from torch._dynamo.test_case import run_tests, TestCase from torch._dynamo.test_case import run_tests, TestCase
from torch._dynamo.testing import CompileCounter, same from torch._dynamo.testing import CompileCounter, same
""" """
This is an example of a pure-python version of autograd implemented by This is an example of a pure-python version of autograd implemented by
@zdevito. It represents a rather challenging test case for TorchDynamo @zdevito. It represents a rather challenging test case for TorchDynamo

View File

@ -3,12 +3,10 @@ import unittest
import weakref import weakref
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.config import torch._dynamo.config
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._logging import torch._logging
from torch.testing._internal.logging_utils import kwargs_to_settings, log_settings from torch.testing._internal.logging_utils import kwargs_to_settings, log_settings

View File

@ -2,7 +2,6 @@
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing

View File

@ -27,11 +27,9 @@ from unittest import mock
import numpy as np import numpy as np
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._dynamo.utils import torch._dynamo.utils
import torch._functorch.config import torch._functorch.config
import torch.library import torch.library
import torch.utils._pytree as pytree import torch.utils._pytree as pytree
@ -40,7 +38,6 @@ from torch._dynamo.debug_utils import same_two_models
from torch._dynamo.testing import CompileCounter, rand_strided, same from torch._dynamo.testing import CompileCounter, rand_strided, same
from torch._inductor.utils import fresh_inductor_cache from torch._inductor.utils import fresh_inductor_cache
from torch.nn import functional as F from torch.nn import functional as F
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
from torch.testing._internal.common_utils import ( from torch.testing._internal.common_utils import (
disable_translation_validation_if_dynamic_shapes, disable_translation_validation_if_dynamic_shapes,

View File

@ -2,11 +2,11 @@
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case
from torch._dynamo.testing import CompileCounter from torch._dynamo.testing import CompileCounter
_variable = 0 _variable = 0
_variable_2 = 0 _variable_2 = 0

View File

@ -15,14 +15,13 @@ import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._logging.structured import torch._logging.structured
import torch.distributed as dist import torch.distributed as dist
from torch._inductor.test_case import TestCase from torch._inductor.test_case import TestCase
from torch._logging._internal import TorchLogsFormatter from torch._logging._internal import TorchLogsFormatter
from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP
from torch.testing._internal.common_utils import find_free_port from torch.testing._internal.common_utils import find_free_port
from torch.testing._internal.inductor_utils import HAS_CUDA from torch.testing._internal.inductor_utils import HAS_CUDA
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda") requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
requires_distributed = functools.partial( requires_distributed = functools.partial(
unittest.skipIf, not dist.is_available(), "requires distributed" unittest.skipIf, not dist.is_available(), "requires distributed"

View File

@ -2,11 +2,9 @@
import functools import functools
import itertools import itertools
import unittest import unittest
from functools import partial from functools import partial
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch._functorch.config import torch._functorch.config
@ -14,7 +12,6 @@ import torch.utils._pytree as pytree
import torch.utils.checkpoint import torch.utils.checkpoint
from torch._dynamo.testing import normalize_gm from torch._dynamo.testing import normalize_gm
from torch._higher_order_ops.wrap import wrap from torch._higher_order_ops.wrap import wrap
from torch.fx.experimental.symbolic_shapes import ( from torch.fx.experimental.symbolic_shapes import (
DimDynamic, DimDynamic,
ShapeEnv, ShapeEnv,

View File

@ -2,12 +2,12 @@
from unittest.mock import patch from unittest.mock import patch
import torch import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
from torch._dynamo.testing import unsupported from torch._dynamo.testing import unsupported
from torch._dynamo.utils import ifdynstaticdefault from torch._dynamo.utils import ifdynstaticdefault
globalmod = torch.nn.ReLU() globalmod = torch.nn.ReLU()

View File

@ -4,7 +4,6 @@ import unittest
from typing import Dict, List from typing import Dict, List
import torch import torch
import torch._dynamo.config import torch._dynamo.config
import torch._dynamo.test_case import torch._dynamo.test_case
from torch import nn from torch import nn
@ -12,6 +11,7 @@ from torch._dynamo.test_case import TestCase
from torch._dynamo.testing import CompileCounter from torch._dynamo.testing import CompileCounter
from torch.testing._internal.common_utils import NoTest from torch.testing._internal.common_utils import NoTest
try: try:
from torchrec.datasets.random import RandomRecDataset from torchrec.datasets.random import RandomRecDataset
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor

View File

@ -23,6 +23,7 @@ from torch._dynamo.trace_rules import (
from torch._dynamo.utils import hashable, is_safe_constant, istype from torch._dynamo.utils import hashable, is_safe_constant, istype
from torch._dynamo.variables import TorchInGraphFunctionVariable, UserFunctionVariable from torch._dynamo.variables import TorchInGraphFunctionVariable, UserFunctionVariable
try: try:
from .utils import create_dummy_module_and_function from .utils import create_dummy_module_and_function
except ImportError: except ImportError:

View File

@ -9,7 +9,6 @@ import torch
import torch._dynamo.test_case import torch._dynamo.test_case
import torch._dynamo.testing import torch._dynamo.testing
import torch.nn.functional as F import torch.nn.functional as F
from torch._dynamo.comptime import comptime from torch._dynamo.comptime import comptime
from torch._dynamo.testing import CompileCounter, same from torch._dynamo.testing import CompileCounter, same
from torch.testing._internal.logging_utils import logs_to_string from torch.testing._internal.logging_utils import logs_to_string

View File

@ -2,7 +2,6 @@
import operator import operator
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.config as config import torch._dynamo.config as config
import torch._dynamo.test_case import torch._dynamo.test_case

View File

@ -1,6 +1,5 @@
# Owner(s): ["module: dynamo"] # Owner(s): ["module: dynamo"]
import torch import torch
import torch._dynamo import torch._dynamo
import torch._dynamo.test_case import torch._dynamo.test_case

View File

@ -7,6 +7,7 @@ import types
import torch import torch
import torch._dynamo import torch._dynamo
g_tensor_export = torch.ones(10) g_tensor_export = torch.ones(10)

View File

@ -40,7 +40,6 @@ ISORT_SKIPLIST = re.compile(
"test/[a-c]*/**", "test/[a-c]*/**",
# test/d*/** # test/d*/**
# test/dy*/** # test/dy*/**
"test/dy*/**",
# test/[e-h]*/** # test/[e-h]*/**
# test/i*/** # test/i*/**
# test/j*/** # test/j*/**