Drop unused imports from test (#49973)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/49973

From
```
./python/libcst/libcst codemod remove_unused_imports.RemoveUnusedImportsWithGlean --no-format caffe2/
```

Test Plan: Standard sandcastle tests

Reviewed By: xush6528

Differential Revision: D25727350

fbshipit-source-id: 237ec4edd85788de920663719173ebec7ddbae1c
This commit is contained in:
Richard Barnes
2021-01-07 12:07:49 -08:00
committed by Facebook GitHub Bot
parent fbdb7822c6
commit ec6d29d6fa
8 changed files with 18 additions and 34 deletions

View File

@ -11,7 +11,7 @@ from typing import Any
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, _inline_everything
from typing import List, Tuple
from typing import List
from torch import Tensor
class TestAsync(JitTestCase):

View File

@ -1,13 +1,12 @@
import os
import sys
import inspect
from typing import Dict, List, Optional, Tuple, Any
from typing import Dict, List, Any
from textwrap import dedent
from collections import OrderedDict
import torch
from torch.testing import FileCheck
from torch import Tensor
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))

View File

@ -2,11 +2,10 @@ import os
import sys
import typing
import typing_extensions
from typing import List, Dict, Optional, Tuple
from typing import List, Dict, Optional
import torch
import torch.nn as nn
from torch import Tensor
from torch.testing import FileCheck
from collections import OrderedDict

View File

@ -5,7 +5,6 @@ import sys
import random
import torch
from itertools import product as product
from torch import Tensor
from torch.testing._internal.common_utils import TemporaryFileName
from typing import NamedTuple, Optional

View File

@ -82,6 +82,7 @@ import torch.testing._internal.hypothesis_utils as hu
hu.assert_deadline_disabled()
# Standard library
from typing import Tuple
import copy
import io
import unittest
@ -1007,10 +1008,7 @@ class TestPostTrainingDynamic(QuantizationTestCase):
super(ScriptWrapperPackedLSTM, self).__init__()
self.cell = cell
def forward(self,
x # type: PackedSequence
):
# type: (...) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]
def forward(self, x: PackedSequence) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
return self.cell(x)
class ScriptWrapperPackedGRU(torch.nn.Module):
@ -1018,10 +1016,7 @@ class TestPostTrainingDynamic(QuantizationTestCase):
super(ScriptWrapperPackedGRU, self).__init__()
self.cell = cell
def forward(self,
x # type: PackedSequence
):
# type: (...) -> Tuple[PackedSequence, torch.Tensor]
def forward(self, x: PackedSequence) -> Tuple[PackedSequence, torch.Tensor]:
return self.cell(x)
script_wrapper_map = {'LSTM': ScriptWrapperPackedLSTM,

View File

@ -44,6 +44,7 @@ from torch._C import TensorType, BoolType, parse_ir, _propagate_shapes
from torch._six import PY37, StringIO
from torch.autograd import Variable
from torch.jit.annotations import BroadcastingList2, BroadcastingList3, Any # noqa: F401
from torch.nn.utils.rnn import PackedSequence
from torch.testing import FileCheck
import torch.autograd.profiler
import torch.cuda
@ -80,30 +81,30 @@ from torch.testing._internal.test_module.no_future_div import div_int_nofuture,
# Standard library
from collections import defaultdict, namedtuple, OrderedDict
import copy
from copy import deepcopy
from itertools import product
import itertools
from textwrap import dedent
from typing import List, Dict, NamedTuple, Optional, Tuple, Union
import inspect
import math
import copy
import functools
import numpy as np
import inspect
import io
import itertools
import math
import numpy as np
import os
import pickle
import pickletools
import random
import re
import shutil
import string
import sys
import tempfile
import types
import unittest
import warnings
import zipfile
import re
import string
def canonical(graph):
@ -14369,7 +14370,6 @@ dedent """
self.assertEqual(eager_out, script_out)
def test_nn_LSTM(self):
from torch.nn.utils.rnn import PackedSequence
input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)])
class S(torch.jit.ScriptModule):
@ -14378,8 +14378,7 @@ dedent """
self.x = torch.nn.LSTM(5, 5)
@torch.jit.script_method
def forward(self, input):
# type: (PackedSequence) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]] # noqa
def forward(self, input: PackedSequence) -> Tuple[PackedSequence, Tuple[torch.Tensor, torch.Tensor]]:
return self.x(input)
eager_out = self.runAndSaveRNG(lambda x: torch.nn.LSTM(5, 5)(x), (input,))[0]
@ -14388,7 +14387,6 @@ dedent """
self.assertEqual(eager_out, script_out)
def test_nn_GRU(self):
from torch.nn.utils.rnn import PackedSequence
seq_input = torch.nn.utils.rnn.pack_sequence([torch.randn(5, 5)])
tensor_input = torch.randn(5, 5, 5)
@ -14398,8 +14396,7 @@ dedent """
self.x = torch.nn.GRU(5, 5)
@torch.jit.script_method
def forward(self, input):
# type: (PackedSequence) -> Tuple[PackedSequence, Tensor]
def forward(self, input: PackedSequence) -> Tuple[PackedSequence, torch.Tensor]:
return self.x(input)
class TensorGRU(torch.jit.ScriptModule):
@ -14408,8 +14405,7 @@ dedent """
self.x = torch.nn.GRU(5, 5)
@torch.jit.script_method
def forward(self, input):
# type: (Tensor) -> Tuple[Tensor, Tensor]
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return self.x(input)
seq_eager_out = self.runAndSaveRNG(lambda x: torch.nn.GRU(5, 5)(x), (seq_input,))[0]

View File

@ -3845,7 +3845,6 @@ class TestLinalg(TestCase):
"""Compare torch and scipy.sparse.linalg implementations of lobpcg
"""
import time
import scipy
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg

View File

@ -1,7 +1,7 @@
import unittest
import torch.testing._internal.common_utils as common
from torch.testing._internal.common_utils import TEST_NUMBA, TEST_NUMPY
from torch.testing._internal.common_utils import TEST_NUMPY
from torch.testing._internal.common_cuda import TEST_NUMBA_CUDA, TEST_CUDA, TEST_MULTIGPU
import torch
@ -9,9 +9,6 @@ import torch
if TEST_NUMPY:
import numpy
if TEST_NUMBA:
import numba
if TEST_NUMBA_CUDA:
import numba.cuda