[BE][Easy][8/19] enforce style for empty lines in import segments in test/[k-p]*/ (#129759)

See https://github.com/pytorch/pytorch/pull/129751#issue-2380881501. Most changes are auto-generated by linter.

You can review these PRs via:

```bash
git diff --ignore-all-space --ignore-blank-lines HEAD~1
```

Pull Request resolved: https://github.com/pytorch/pytorch/pull/129759
Approved by: https://github.com/justinchuby, https://github.com/ezyang
This commit is contained in:
Xuehai Pan
2024-07-26 14:30:32 +08:00
committed by PyTorch MergeBot
parent 914577569d
commit fbe6f42dcf
78 changed files with 69 additions and 41 deletions

View File

@ -10,6 +10,7 @@ import torch._lazy.ts_backend
import torch.nn as nn
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase
torch._lazy.ts_backend.init()

View File

@ -4,6 +4,7 @@ import unittest
from torch._lazy.ts_backend import init as init_ts_backend
init_ts_backend()
import copy
import dis

View File

@ -7,6 +7,7 @@ import torch._lazy.metrics as metrics
import torch._lazy.ts_backend
from torch.testing._internal.common_utils import run_tests, TestCase
torch._lazy.ts_backend.init()
NODE_TYPE_PATTERN = re.compile(r", NodeType=[^\n]+")

View File

@ -5,6 +5,7 @@ import torch._lazy.metrics as metrics
import torch._lazy.ts_backend
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
torch._lazy.ts_backend.init()

View File

@ -4,9 +4,9 @@ import torch
import torch._lazy
import torch._lazy.ts_backend
from torch import float16, float32
from torch.testing._internal.common_utils import TestCase
torch._lazy.ts_backend.init()

View File

@ -11,6 +11,7 @@ import torch._lazy.metrics as metrics
import torch._lazy.ts_backend
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase
torch._lazy.ts_backend.init()
torch._lazy.config.set_reuse_ir(True)

View File

@ -7,6 +7,7 @@ import torch._lazy
import torch._lazy.ts_backend
from torch.testing._internal.common_utils import run_tests, TestCase
torch._lazy.ts_backend.init()

View File

@ -9,6 +9,7 @@ from torchvision import models
import torch
# Download and trace the model.
model = models.mobilenet_v2(weights=models.MobileNet_V2_Weights.IMAGENET1K_V1)
model.eval()

View File

@ -1,13 +1,13 @@
import functools
import os
import shutil
import sys
from io import BytesIO
import torch
from torch.jit.mobile import _export_operator_list, _load_for_lite_interpreter
_OPERATORS = set()
_FILENAMES = []
_MODELS = []

View File

@ -29,12 +29,7 @@ from nn_ops import (
NNUtilsModule,
NNVisionModule,
)
from quantization_ops import (
FusedQuantModule,
GeneralQuantModule,
# DynamicQuantModule,
StaticQuantModule,
)
from quantization_ops import FusedQuantModule, GeneralQuantModule, StaticQuantModule
from sampling_ops import SamplingOpsModule
from tensor_ops import (
TensorCreationOpsModule,
@ -52,6 +47,7 @@ from torchvision_models import (
import torch
from torch.jit.mobile import _load_for_lite_interpreter
test_path_ios = "ios/TestApp/models/"
test_path_android = "android/pytorch_android/src/androidTest/assets/"

View File

@ -7,6 +7,7 @@ import sys
import yaml
root_operators = {}
traced_operators = {}
kernel_metadata = {}

View File

@ -20,6 +20,7 @@ from torch.jit.mobile import (
)
from torch.testing._internal.common_utils import run_tests, TestCase
pytorch_test_dir = Path(__file__).resolve().parents[1]
# script_module_v4.ptl and script_module_v5.ptl source code

View File

@ -7,7 +7,6 @@ from typing import Dict, List
import torch
import torch.utils.bundled_inputs
from torch.jit.mobile import _export_operator_list, _load_for_lite_interpreter
from torch.testing import FileCheck
from torch.testing._internal.common_quantization import (

View File

@ -7,7 +7,6 @@ from typing import Dict, List, NamedTuple
import torch
import torch.utils.bundled_inputs
from torch.jit.mobile import _load_for_lite_interpreter
from torch.testing._internal.common_utils import run_tests, TestCase

View File

@ -6,9 +6,9 @@ from pathlib import Path
from torch.jit.generate_bytecode import generate_upgraders_bytecode
from torch.testing._internal.common_utils import run_tests, TestCase
from torchgen.operator_versions.gen_mobile_upgraders import sort_upgrader, write_cpp
pytorch_caffe2_dir = Path(__file__).resolve().parents[2]

View File

@ -6,10 +6,10 @@ from pathlib import Path
import torch
import torch.utils.bundled_inputs
from torch.jit.mobile import _load_for_lite_interpreter
from torch.testing._internal.common_utils import run_tests, TestCase
pytorch_test_dir = Path(__file__).resolve().parents[1]

View File

@ -6,12 +6,10 @@ import warnings
from itertools import product
import torch
import torch.autograd.forward_ad as fwAD
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import make_tensor
from torch.testing._internal.common_cuda import (
TEST_CUDA,
@ -62,6 +60,7 @@ from torch.testing._internal.common_utils import (
TEST_WITH_ROCM,
)
AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32()

View File

@ -19,7 +19,6 @@ from torch.testing._internal.common_device_type import (
skipMeta,
TEST_WITH_ROCM,
)
from torch.testing._internal.common_nn import NNTestCase
from torch.testing._internal.common_utils import (
_assertGradAndGradgradChecks,

View File

@ -9,7 +9,6 @@ from operator import mul
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.testing._internal.common_utils import (
run_tests,
skipIfNoLapack,
@ -19,6 +18,7 @@ from torch.testing._internal.common_utils import (
TestCase,
)
if TEST_SCIPY:
from scipy import stats

View File

@ -19,6 +19,7 @@ from torch.testing._internal.common_utils import (
)
from torch.utils._pytree import tree_map
if TEST_NUMPY:
import numpy as np

View File

@ -7,7 +7,6 @@ import warnings
import weakref
from collections import namedtuple, OrderedDict
from copy import deepcopy
from functools import partial
from tempfile import NamedTemporaryFile
from typing import Any, Dict, List, Tuple

View File

@ -6,7 +6,6 @@ import unittest.mock as mock
import torch
import torch.nn as nn
from torch.nn import MultiheadAttention
from torch.testing._internal.common_device_type import (
dtypes,
@ -22,6 +21,7 @@ from torch.testing._internal.common_utils import (
TEST_WITH_CROSSREF,
)
if TEST_NUMPY:
import numpy as np

View File

@ -4,7 +4,6 @@ from copy import deepcopy
from itertools import product
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init

View File

@ -13,7 +13,6 @@ from itertools import repeat
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import inf, nan
from torch.autograd import gradcheck, gradgradcheck
from torch.testing import make_tensor

View File

@ -4,7 +4,6 @@ import unittest
import unittest.mock as mock
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
from torch.testing._internal.common_nn import NNTestCase

View File

@ -20,10 +20,10 @@ from torch.onnx import (
_OrtBackendOptions as OrtBackendOptions,
ExportOptions,
)
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNNModuleInlined
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import onnx_test_common

View File

@ -13,7 +13,6 @@ from torch.onnx._internal.exporter import (
ProtobufONNXProgramSerializer,
ResolvedExportOptions,
)
from torch.testing._internal import common_utils

View File

@ -21,6 +21,7 @@ from torch.onnx._internal.fx import (
)
from torch.testing._internal import common_utils
# TODO: this can only be global. https://github.com/microsoft/onnxscript/issues/805
TCustomFloat = TypeVar("TCustomFloat", bound=Union[FLOAT16, FLOAT, DOUBLE, BFLOAT16])

View File

@ -17,6 +17,7 @@ import onnxscript
import torch
_MISMATCH_MARKDOWN_TEMPLATE = """\
### Summary

View File

@ -16,6 +16,7 @@ from torch.onnx._internal.diagnostics.infra import formatter, sarif
from torch.onnx._internal.fx import diagnostics as fx_diagnostics
from torch.testing._internal import common_utils, logging_utils
if typing.TYPE_CHECKING:
import unittest

View File

@ -1,6 +1,7 @@
import torch
import torch.nn as nn
# configurable
bsz = 64
imgsz = 64

View File

@ -3,7 +3,6 @@
from __future__ import annotations
import contextlib
import copy
import dataclasses
import io
@ -26,7 +25,6 @@ from typing import (
)
import numpy as np
import onnxruntime
import pytest
import pytorch_test_common
@ -39,6 +37,7 @@ from torch.testing._internal import common_utils
from torch.testing._internal.opinfo import core as opinfo_core
from torch.types import Number
_NumericType = Union[Number, torch.Tensor, np.ndarray]
_ModelType = Union[torch.nn.Module, Callable, torch_export.ExportedProgram]
_InputArgsType = Optional[

View File

@ -18,6 +18,7 @@ from torch.autograd import function
from torch.onnx._internal import diagnostics
from torch.testing._internal import common_utils
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(-1, pytorch_test_dir)

View File

@ -11,6 +11,7 @@ import torch.nn as nn
from torch.autograd import Variable
from torch.onnx import OperatorExportTypes
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)

View File

@ -50,9 +50,7 @@ from typing import (
)
import error_reproduction
import onnx_test_common
import parameterized
import pytest
import pytorch_test_common

View File

@ -4,7 +4,6 @@ import pytorch_test_common
import torch
import torch._dynamo
import torch.fx
from torch.onnx._internal.fx.passes import _utils as pass_utils
from torch.testing._internal import common_utils

View File

@ -2,13 +2,12 @@
from __future__ import annotations
import logging
import tempfile
from typing import Mapping, Tuple
import onnx
import onnx.inliner
import pytorch_test_common
import transformers # type: ignore[import]

View File

@ -3,6 +3,7 @@ from __future__ import annotations
import onnx
import onnx.inliner
import pytorch_test_common
import torch

View File

@ -7,20 +7,17 @@ import operator
import os
import tempfile
import unittest
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Type
import onnx_test_common
import onnxruntime # type: ignore[import]
import parameterized # type: ignore[import]
import pytorch_test_common
import transformers # type: ignore[import]
import torch
import torch.onnx
from torch import nn
from torch._subclasses import fake_tensor
from torch.onnx._internal import exporter
from torch.onnx._internal.fx import (
@ -31,6 +28,7 @@ from torch.onnx._internal.fx import (
)
from torch.testing._internal import common_utils
try:
import torchvision # type: ignore[import]

View File

@ -31,6 +31,7 @@ from torch.onnx import OperatorExportTypes
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
if torch.cuda.is_available():
def toC(x):

View File

@ -4,6 +4,7 @@ import io
import itertools
import onnx
import pytorch_test_common
import torch

View File

@ -5,6 +5,7 @@ import io
from typing import List
import onnx
import onnxscript
from onnxscript.onnx_types import FLOAT

View File

@ -40,6 +40,7 @@ from torch.testing._internal import (
common_utils,
)
OPS_DB = copy.deepcopy(common_methods_invocations.op_db)
# Modify this section ##########################################################

View File

@ -41,6 +41,7 @@ from torch.onnx.symbolic_helper import (
from torch.testing._internal import common_utils
from torch.testing._internal.common_utils import skipIfNoLapack
unittest.TestCase.maxDiff = None
_onnx_test = False # flag to produce onnx test cases.

View File

@ -13,6 +13,7 @@ import warnings
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import onnx
import onnx.numpy_helper
import pytorch_test_common

View File

@ -3,7 +3,6 @@
from __future__ import annotations
import functools
import io
import itertools
import os
@ -12,6 +11,7 @@ from collections import OrderedDict
from typing import Dict, List, Optional, Tuple, Type, Union
import numpy as np
import onnx
import onnx_test_common
import parameterized
@ -38,7 +38,6 @@ from pytorch_test_common import (
)
import torch
from torch import Tensor
from torch.nn.utils import rnn as rnn_utils
from torch.onnx import errors, verification

View File

@ -3,7 +3,6 @@
import unittest
import onnx_test_common
import onnxruntime # noqa: F401
import parameterized
from onnx_test_common import MAX_ONNX_OPSET_VERSION, MIN_ONNX_OPSET_VERSION

View File

@ -3,6 +3,7 @@
import io
import numpy as np
import onnx
import pytorch_test_common
from pytorch_test_common import skipIfUnsupportedMinOpsetVersion

View File

@ -8,6 +8,7 @@ import warnings
from typing import Callable
import onnx
import parameterized
import pytorch_test_common
import torchvision

View File

@ -6,6 +6,7 @@ import tempfile
import unittest
import numpy as np
import onnx
import parameterized
import pytorch_test_common

View File

@ -6,10 +6,10 @@ import sys
import torch
import torch.onnx
from torch.testing._internal import common_utils
from torch.utils import _pytree as torch_pytree
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import onnx_test_common

View File

@ -2,6 +2,7 @@ import difflib
import io
import numpy as np
import onnx
import onnx.helper

View File

@ -39,6 +39,7 @@ from torch.testing._internal.common_utils import (
TestCase,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests

View File

@ -17,6 +17,7 @@ from torch.testing._internal.common_utils import (
TestCase,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests

View File

@ -5,6 +5,7 @@ from torch.fx import symbolic_trace
from torch.package import PackageExporter
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE
packaging_directory = f"{Path(__file__).parent}/package_bc"
torch.package.package_exporter._gate_torchscript_serialization = False

View File

@ -3,6 +3,7 @@
import torch
from torch.fx import wrap
wrap("a_non_torch_leaf")

View File

@ -2,6 +2,7 @@
import torch
try:
from torchvision.models import resnet18

View File

@ -4,6 +4,7 @@ import torch
from torch.package import analyze
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -7,11 +7,11 @@ from textwrap import dedent
from unittest import skipIf
import torch.nn
from torch.package import EmptyMatchError, Importer, PackageExporter, PackageImporter
from torch.package.package_exporter import PackagingError
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -5,6 +5,7 @@ from io import BytesIO
from torch.package import PackageExporter
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -3,6 +3,7 @@
from torch.package._digraph import DiGraph
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -16,6 +16,7 @@ from torch.testing._internal.common_utils import (
run_tests,
)
try:
from torchvision.models import resnet18
@ -33,6 +34,7 @@ except ImportError:
from pathlib import Path
packaging_directory = Path(__file__).parent

View File

@ -5,6 +5,7 @@ from typing import Iterable
from torch.package import GlobGroup
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -12,6 +12,7 @@ from torch.package import (
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -6,6 +6,7 @@ from unittest import skipIf
from torch.package import PackageImporter
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -11,6 +11,7 @@ from torch.package._mangling import (
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -18,6 +18,7 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo,
)
try:
from .common import PackageTestCase
except ImportError:

View File

@ -8,6 +8,7 @@ import torch
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from torchvision.models import resnet18

View File

@ -12,6 +12,7 @@ from torch.package import (
)
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -8,6 +8,7 @@ import torch
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import IS_FBCODE, IS_SANDCASTLE, run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -5,6 +5,7 @@ from io import BytesIO
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -8,6 +8,7 @@ from unittest import skipIf
from torch.package import PackageExporter, PackageImporter
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:

View File

@ -7,6 +7,7 @@ from textwrap import dedent
from torch.package import PackageExporter, PackageImporter, sys_importer
from torch.testing._internal.common_utils import run_tests
try:
from .common import PackageTestCase
except ImportError:
@ -15,6 +16,7 @@ except ImportError:
from pathlib import Path
packaging_directory = Path(__file__).parent

View File

@ -33,7 +33,6 @@ from torch.profiler import (
record_function,
supported_activities,
)
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (
IS_WINDOWS,
@ -41,9 +40,9 @@ from torch.testing._internal.common_utils import (
skipIfTorchDynamo,
TestCase,
)
from torch.utils._triton import has_triton
Json = Dict[str, Any]

View File

@ -21,6 +21,7 @@ from torch.testing._internal.common_utils import (
)
from torch.utils._pytree import tree_map
# These functions can vary from based on platform and build (e.g. with CUDA)
# and generally distract from rather than adding to the test.
PRUNE_ALL = 1

View File

@ -27,6 +27,7 @@ from torch.autograd.profiler import profile as _profile
from torch.profiler import kineto_available, record_function
from torch.testing._internal.common_utils import run_tests, TestCase
Json = Dict[str, Any]

View File

@ -28,6 +28,7 @@ from torch._C._profiler import _TensorMetadata
from torch.profiler import _utils, profile
from torch.testing._internal.common_utils import run_tests, TestCase
Json = Dict[str, Any]
from torch._C._profiler import _ExtraFields_PyCall

View File

@ -45,7 +45,6 @@ ISORT_SKIPLIST = re.compile(
# test/j*/**
"test/j*/**",
# test/[k-p]*/**
"test/[k-p]*/**",
# test/[q-z]*/**
"test/[q-z]*/**",
# torch/**