mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[BE][Easy][11/19] enforce style for empty lines in import segments in test/dy*/
(#129762)
See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/129762 Approved by: https://github.com/anijain2305
This commit is contained in:
committed by
PyTorch MergeBot
parent
ae9f17a821
commit
918ece4f4d
@ -7,13 +7,11 @@ from importlib import import_module
|
||||
|
||||
import torch
|
||||
import torch._dynamo.config
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._functorch.config
|
||||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
import torch.utils.checkpoint
|
||||
|
||||
from functorch.compile import min_cut_rematerialization_partition
|
||||
from torch._dynamo.backends.common import aot_autograd
|
||||
from torch._dynamo.testing import CompileCounterWithBackend
|
||||
@ -31,6 +29,7 @@ from torch.utils.checkpoint import (
|
||||
create_selective_checkpoint_contexts,
|
||||
)
|
||||
|
||||
|
||||
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
|
||||
requires_distributed = functools.partial(
|
||||
unittest.skipIf, not dist.is_available(), "requires distributed"
|
||||
|
@ -8,9 +8,7 @@ import tempfile
|
||||
import unittest
|
||||
|
||||
import torch._dynamo.test_case
|
||||
|
||||
from torch._dynamo.repro.after_aot import InputReader, InputWriter, save_graph_repro
|
||||
|
||||
from torch.fx.experimental.proxy_tensor import make_fx
|
||||
from torch.testing._internal.common_utils import IS_FBCODE
|
||||
from torch.utils._traceback import report_compile_source_on_error
|
||||
|
@ -6,7 +6,6 @@ from textwrap import dedent
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
import torch.fx.traceback as fx_traceback
|
||||
|
@ -7,7 +7,6 @@ from unittest.mock import patch
|
||||
import torch
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
|
||||
import torch._functorch._aot_autograd
|
||||
from torch._dynamo import config as dynamo_config
|
||||
from torch._dynamo.utils import counters
|
||||
|
@ -2,16 +2,15 @@
|
||||
# flake8: noqa: B950
|
||||
import copy
|
||||
import math
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch._dynamo.utils
|
||||
from torch.testing._internal.triton_utils import HAS_CUDA, requires_cuda
|
||||
|
||||
|
||||
if HAS_CUDA:
|
||||
import triton
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
from torch._dynamo.backends.debugging import ExplainWithBackend
|
||||
@ -12,6 +11,7 @@ from torch._dynamo.testing import same
|
||||
from torch.fx._lazy_graph_module import _force_skip_lazy_graph_module
|
||||
from torch.testing._internal.inductor_utils import HAS_CUDA
|
||||
|
||||
|
||||
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
|
||||
|
||||
|
||||
|
@ -4,7 +4,6 @@
|
||||
import functools
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch._dynamo.utils
|
||||
|
@ -2,11 +2,11 @@
|
||||
import unittest.mock
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.testing import same
|
||||
|
||||
|
||||
try:
|
||||
from diffusers.models import unet_2d
|
||||
except ImportError:
|
||||
|
@ -9,6 +9,7 @@ import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.comptime import comptime
|
||||
|
||||
|
||||
# Because we don't support free variables in comptime at the moment,
|
||||
# we have to communicate via globals. This also means these tests cannot
|
||||
# be run in parallel in a single process (not that you'd... ever want
|
||||
|
@ -5,6 +5,7 @@ import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.utils import disable_cache_limit
|
||||
|
||||
|
||||
# NB: do NOT include this test class in test_dynamic_shapes.py
|
||||
|
||||
|
||||
|
@ -2,12 +2,10 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch.onnx.operators
|
||||
from torch._dynamo.testing import EagerAndRecordGraphs, normalize_gm, same
|
||||
|
||||
from torch.nn import functional as F
|
||||
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
|
||||
from torch.testing._internal.common_utils import TEST_WITH_ROCM
|
||||
|
@ -4,7 +4,6 @@ import functools
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.config
|
||||
import torch._dynamo.test_case
|
||||
|
@ -6,20 +6,22 @@ import pathlib
|
||||
import sys
|
||||
|
||||
import torch
|
||||
|
||||
from torch.testing._internal.common_cuda import IS_JETSON, IS_WINDOWS
|
||||
from torch.testing._internal.common_utils import run_tests
|
||||
|
||||
|
||||
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
sys.path.append(pytorch_test_dir)
|
||||
|
||||
from dynamo.test_cudagraphs import TestAotCudagraphs # noqa: F401
|
||||
|
||||
|
||||
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
from tools.stats.import_test_stats import get_disabled_tests
|
||||
|
||||
|
||||
# Make sure to remove REPO_ROOT after import is done
|
||||
sys.path.remove(str(REPO_ROOT))
|
||||
|
||||
|
@ -3,13 +3,13 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
|
||||
from functorch import make_fx
|
||||
from torch._dynamo import debug_utils
|
||||
from torch._dynamo.debug_utils import aot_graph_input_parser
|
||||
from torch._dynamo.test_case import TestCase
|
||||
from torch.testing._internal.inductor_utils import HAS_CUDA
|
||||
|
||||
|
||||
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
|
||||
|
||||
f32 = torch.float32
|
||||
|
@ -5,7 +5,6 @@ import unittest.mock as mock
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.exc import IncorrectUsage
|
||||
|
@ -3,7 +3,6 @@ import unittest
|
||||
from unittest.mock import Mock
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.device_interface import CudaInterface, DeviceGuard
|
||||
|
@ -7,6 +7,7 @@ from torch._dynamo.testing import make_test_cls_with_patches
|
||||
from torch.fx.experimental import _config as fx_config
|
||||
from torch.testing._internal.common_utils import slowTest, TEST_Z3
|
||||
|
||||
|
||||
try:
|
||||
from . import (
|
||||
test_aot_autograd,
|
||||
@ -27,6 +28,7 @@ except ImportError:
|
||||
import test_functions
|
||||
import test_higher_order_ops
|
||||
import test_misc
|
||||
|
||||
import test_modules
|
||||
import test_repros
|
||||
import test_sdpa
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
import torch
|
||||
import torch._dynamo.config
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._functorch.config
|
||||
import torch.utils.checkpoint
|
||||
|
@ -17,7 +17,6 @@ import torch
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
|
||||
from functorch.experimental.control_flow import cond
|
||||
from torch._dynamo import config
|
||||
from torch._dynamo.exc import UserError
|
||||
|
@ -4,6 +4,7 @@ import torch
|
||||
import torch._dynamo.test_case
|
||||
from torch._guards import CompileId
|
||||
|
||||
|
||||
set_eval_frame = torch._C._dynamo.eval_frame.set_eval_frame # noqa: F401
|
||||
|
||||
|
||||
|
@ -16,7 +16,6 @@ from unittest.mock import patch
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch import sub
|
||||
@ -28,7 +27,6 @@ from torch._dynamo.testing import (
|
||||
from torch._dynamo.utils import ifdynstaticdefault, same
|
||||
from torch._dynamo.variables import ConstantVariable
|
||||
from torch._dynamo.variables.lists import RangeVariable
|
||||
|
||||
from torch.nn import functional as F
|
||||
from torch.testing._internal.common_utils import (
|
||||
disable_translation_validation_if_dynamic_shapes,
|
||||
@ -39,6 +37,7 @@ from torch.testing._internal.common_utils import (
|
||||
# Defines all the kernels for tests
|
||||
from torch.testing._internal.triton_utils import * # noqa: F403
|
||||
|
||||
|
||||
d = torch.ones(10, 10)
|
||||
e = torch.nn.Linear(10, 10)
|
||||
flag = True
|
||||
|
@ -2,7 +2,6 @@
|
||||
from unittest import mock
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
from torch._inductor.utils import pass_execution_and_save
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Owner(s): ["module: dynamo"]
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.testing import same
|
||||
|
||||
|
||||
try:
|
||||
from . import utils
|
||||
except ImportError:
|
||||
|
@ -9,6 +9,7 @@ from torch._C._dynamo import guards
|
||||
from torch._dynamo.convert_frame import GlobalStateGuard
|
||||
from torch.testing._internal.common_utils import set_default_dtype
|
||||
|
||||
|
||||
RootGuardManager = guards.RootGuardManager
|
||||
DictGuardManager = guards.DictGuardManager
|
||||
DictSubclassGuardManager = guards.DictSubclassGuardManager
|
||||
|
@ -7,10 +7,8 @@ import unittest
|
||||
import warnings
|
||||
|
||||
import functorch.experimental.control_flow as control_flow
|
||||
|
||||
import torch
|
||||
import torch._dynamo.config as config
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._functorch.config
|
||||
import torch.nn as nn
|
||||
|
@ -8,7 +8,6 @@ import torch
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
|
||||
from functorch.compile import nop
|
||||
from torch._dynamo import compiled_autograd
|
||||
from torch._functorch.aot_autograd import aot_module_simplified
|
||||
|
@ -4,6 +4,7 @@ import unittest
|
||||
from torch._dynamo import config
|
||||
from torch._dynamo.testing import make_test_cls_with_patches
|
||||
|
||||
|
||||
try:
|
||||
from . import (
|
||||
test_aot_autograd,
|
||||
@ -11,13 +12,13 @@ try:
|
||||
test_higher_order_ops,
|
||||
test_misc,
|
||||
test_modules,
|
||||
# test_repros,
|
||||
)
|
||||
except ImportError:
|
||||
import test_aot_autograd
|
||||
import test_functions
|
||||
import test_higher_order_ops
|
||||
import test_misc
|
||||
|
||||
import test_modules
|
||||
|
||||
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Owner(s): ["module: dynamo"]
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch.onnx.operators
|
||||
|
@ -10,9 +10,7 @@ import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch.distributed as dist
|
||||
from torch._dynamo.testing import skipIfNotPy311
|
||||
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
|
||||
from torch.testing._internal.common_utils import (
|
||||
find_free_port,
|
||||
munge_exc,
|
||||
@ -25,6 +23,7 @@ from torch.testing._internal.logging_utils import (
|
||||
make_settings_test,
|
||||
)
|
||||
|
||||
|
||||
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
|
||||
requires_distributed = functools.partial(
|
||||
unittest.skipIf, not dist.is_available(), "requires distributed"
|
||||
|
@ -5,6 +5,7 @@ import torch._dynamo
|
||||
from torch._dynamo.test_minifier_common import MinifierTestBase
|
||||
from torch.testing._internal.common_utils import skipIfNNModuleInlined
|
||||
|
||||
|
||||
requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "requires cuda")
|
||||
|
||||
|
||||
|
@ -28,10 +28,8 @@ import numpy as np
|
||||
|
||||
import torch
|
||||
import torch._dynamo.testing
|
||||
|
||||
import torch._inductor.test_case
|
||||
import torch.onnx.operators
|
||||
|
||||
import torch.utils._pytree as pytree
|
||||
import torch.utils.cpp_extension
|
||||
from torch import Tensor
|
||||
@ -85,6 +83,7 @@ from torch.testing._internal.common_utils import (
|
||||
from torch.testing._internal.jit_utils import JitTestCase
|
||||
from torch.testing._internal.logging_utils import logs_to_string
|
||||
|
||||
|
||||
mytuple = collections.namedtuple("mytuple", ["a", "b", "ab"])
|
||||
T = typing.TypeVar("T")
|
||||
|
||||
|
@ -3,11 +3,11 @@ import dataclasses
|
||||
import unittest.mock
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.testing import same
|
||||
|
||||
|
||||
try:
|
||||
from transformers import modeling_outputs
|
||||
from transformers.configuration_utils import PretrainedConfig
|
||||
|
@ -14,7 +14,6 @@ from typing import Dict, NamedTuple, Tuple
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch.nn.functional as F
|
||||
@ -25,6 +24,7 @@ from torch._dynamo.testing import expectedFailureDynamic, same
|
||||
from torch.nn.modules.lazy import LazyModuleMixin
|
||||
from torch.nn.parameter import Parameter, UninitializedParameter
|
||||
|
||||
|
||||
try:
|
||||
from . import test_functions
|
||||
except ImportError:
|
||||
|
@ -1,11 +1,11 @@
|
||||
# Owner(s): ["module: dynamo"]
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo import eval_frame
|
||||
from torch._dynamo.hooks import Hooks
|
||||
|
||||
|
||||
c = 10
|
||||
|
||||
|
||||
|
@ -1,14 +1,11 @@
|
||||
# Owner(s): ["module: dynamo"]
|
||||
"""
|
||||
PYTEST_DONT_REWRITE (prevents pytest from rewriting assertions, which interferes
|
||||
with test_adam in OptimizerTests)
|
||||
"""
|
||||
import functools
|
||||
|
||||
# Owner(s): ["module: dynamo"]
|
||||
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Owner(s): ["module: dynamo"]
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
|
||||
|
@ -2,13 +2,10 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch._dynamo.utils
|
||||
|
||||
from torch._dynamo.utils import dynamo_timed
|
||||
|
||||
from torch.testing._internal.common_utils import TemporaryFileName
|
||||
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
from typing import Callable, Dict, List, NamedTuple, Optional
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
from torch._dynamo.test_case import run_tests, TestCase
|
||||
from torch._dynamo.testing import CompileCounter, same
|
||||
|
||||
|
||||
"""
|
||||
This is an example of a pure-python version of autograd implemented by
|
||||
@zdevito. It represents a rather challenging test case for TorchDynamo
|
||||
|
@ -3,12 +3,10 @@ import unittest
|
||||
import weakref
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.config
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
|
||||
import torch._logging
|
||||
from torch.testing._internal.logging_utils import kwargs_to_settings, log_settings
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
|
||||
|
@ -27,11 +27,9 @@ from unittest import mock
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch._dynamo.utils
|
||||
|
||||
import torch._functorch.config
|
||||
import torch.library
|
||||
import torch.utils._pytree as pytree
|
||||
@ -40,7 +38,6 @@ from torch._dynamo.debug_utils import same_two_models
|
||||
from torch._dynamo.testing import CompileCounter, rand_strided, same
|
||||
from torch._inductor.utils import fresh_inductor_cache
|
||||
from torch.nn import functional as F
|
||||
|
||||
from torch.testing._internal.common_cuda import PLATFORM_SUPPORTS_FLASH_ATTENTION
|
||||
from torch.testing._internal.common_utils import (
|
||||
disable_translation_validation_if_dynamic_shapes,
|
||||
|
@ -2,11 +2,11 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
from torch._dynamo.testing import CompileCounter
|
||||
|
||||
|
||||
_variable = 0
|
||||
_variable_2 = 0
|
||||
|
||||
|
@ -15,14 +15,13 @@ import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch._logging.structured
|
||||
import torch.distributed as dist
|
||||
|
||||
from torch._inductor.test_case import TestCase
|
||||
|
||||
from torch._logging._internal import TorchLogsFormatter
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
from torch.testing._internal.common_utils import find_free_port
|
||||
from torch.testing._internal.inductor_utils import HAS_CUDA
|
||||
|
||||
|
||||
requires_cuda = unittest.skipUnless(HAS_CUDA, "requires cuda")
|
||||
requires_distributed = functools.partial(
|
||||
unittest.skipIf, not dist.is_available(), "requires distributed"
|
||||
|
@ -2,11 +2,9 @@
|
||||
import functools
|
||||
import itertools
|
||||
import unittest
|
||||
|
||||
from functools import partial
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch._functorch.config
|
||||
@ -14,7 +12,6 @@ import torch.utils._pytree as pytree
|
||||
import torch.utils.checkpoint
|
||||
from torch._dynamo.testing import normalize_gm
|
||||
from torch._higher_order_ops.wrap import wrap
|
||||
|
||||
from torch.fx.experimental.symbolic_shapes import (
|
||||
DimDynamic,
|
||||
ShapeEnv,
|
||||
|
@ -2,12 +2,12 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
from torch._dynamo.testing import unsupported
|
||||
from torch._dynamo.utils import ifdynstaticdefault
|
||||
|
||||
|
||||
globalmod = torch.nn.ReLU()
|
||||
|
||||
|
||||
|
@ -4,7 +4,6 @@ import unittest
|
||||
from typing import Dict, List
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo.config
|
||||
import torch._dynamo.test_case
|
||||
from torch import nn
|
||||
@ -12,6 +11,7 @@ from torch._dynamo.test_case import TestCase
|
||||
from torch._dynamo.testing import CompileCounter
|
||||
from torch.testing._internal.common_utils import NoTest
|
||||
|
||||
|
||||
try:
|
||||
from torchrec.datasets.random import RandomRecDataset
|
||||
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
|
||||
|
@ -23,6 +23,7 @@ from torch._dynamo.trace_rules import (
|
||||
from torch._dynamo.utils import hashable, is_safe_constant, istype
|
||||
from torch._dynamo.variables import TorchInGraphFunctionVariable, UserFunctionVariable
|
||||
|
||||
|
||||
try:
|
||||
from .utils import create_dummy_module_and_function
|
||||
except ImportError:
|
||||
|
@ -9,7 +9,6 @@ import torch
|
||||
import torch._dynamo.test_case
|
||||
import torch._dynamo.testing
|
||||
import torch.nn.functional as F
|
||||
|
||||
from torch._dynamo.comptime import comptime
|
||||
from torch._dynamo.testing import CompileCounter, same
|
||||
from torch.testing._internal.logging_utils import logs_to_string
|
||||
|
@ -2,7 +2,6 @@
|
||||
import operator
|
||||
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.config as config
|
||||
import torch._dynamo.test_case
|
||||
|
@ -1,6 +1,5 @@
|
||||
# Owner(s): ["module: dynamo"]
|
||||
import torch
|
||||
|
||||
import torch._dynamo
|
||||
import torch._dynamo.test_case
|
||||
|
||||
|
@ -7,6 +7,7 @@ import types
|
||||
import torch
|
||||
import torch._dynamo
|
||||
|
||||
|
||||
g_tensor_export = torch.ones(10)
|
||||
|
||||
|
||||
|
@ -40,7 +40,6 @@ ISORT_SKIPLIST = re.compile(
|
||||
"test/[a-c]*/**",
|
||||
# test/d*/**
|
||||
# test/dy*/**
|
||||
"test/dy*/**",
|
||||
# test/[e-h]*/**
|
||||
# test/i*/**
|
||||
# test/j*/**
|
||||
|
Reference in New Issue
Block a user