mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Un-ignore F403 in .flake8 (#55838)
Summary: Generally wildcard imports are bad for the reasons described here: https://www.flake8rules.com/rules/F403.html This PR replaces wildcard imports with an explicit list of imported items where possible, and adds a `# noqa: F403` comment in the other cases (mostly re-exports in `__init__.py` files). This is a prerequisite for https://github.com/pytorch/pytorch/issues/55816, because currently [`tools/codegen/dest/register_dispatch_key.py` simply fails if you sort its imports](https://github.com/pytorch/pytorch/actions/runs/742505908). Pull Request resolved: https://github.com/pytorch/pytorch/pull/55838 Test Plan: CI. You can also run `flake8` locally. Reviewed By: jbschlosser Differential Revision: D27724232 Pulled By: samestep fbshipit-source-id: 269fb09cb4168f8a51fd65bfaacc6cda7fb87c34
This commit is contained in:
committed by
Facebook GitHub Bot
parent
75eb026e07
commit
4753100a3b
2
.flake8
2
.flake8
@ -4,7 +4,7 @@ max-line-length = 120
|
||||
# C408 ignored because we like the dict keyword argument syntax
|
||||
# E501 is not flexible enough, we're using B950 instead
|
||||
ignore =
|
||||
E203,E305,E402,E501,E721,E741,F403,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
|
||||
E203,E305,E402,E501,E721,E741,F405,F821,F841,F999,W503,W504,C408,E302,W291,E303,
|
||||
# shebang has extra meaning in fbcode lints, so I think it's not worth trying
|
||||
# to line this up with executable bit
|
||||
EXE001,
|
||||
|
@ -1,5 +1,5 @@
|
||||
from .cells import *
|
||||
from .factory import *
|
||||
from .cells import * # noqa: F403
|
||||
from .factory import * # noqa: F403
|
||||
|
||||
# (output, next_state) = cell(input, state)
|
||||
seqLength = 100
|
||||
|
@ -3,7 +3,12 @@ from functools import partial
|
||||
import torch
|
||||
import torchvision.models as cnn
|
||||
|
||||
from .factory import *
|
||||
from .factory import (dropoutlstm_creator, imagenet_cnn_creator,
|
||||
layernorm_pytorch_lstm_creator, lnlstm_creator,
|
||||
lstm_creator, lstm_multilayer_creator,
|
||||
lstm_premul_bias_creator, lstm_premul_creator,
|
||||
lstm_simple_creator, pytorch_lstm_creator,
|
||||
varlen_lstm_creator, varlen_pytorch_lstm_creator)
|
||||
|
||||
|
||||
class DisableCuDNN():
|
||||
|
@ -1,9 +1,24 @@
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from typing import Tuple
|
||||
|
||||
import torch
|
||||
from jit.test_hooks_modules import *
|
||||
from jit.test_hooks_modules import (
|
||||
ModuleDirectFowardSubmodCall, ModuleForwardSingleInput,
|
||||
ModuleForwardTupleInput, create_forward_tuple_input,
|
||||
create_module_forward_multiple_inputs, create_module_forward_single_input,
|
||||
create_module_hook_return_nothing,
|
||||
create_module_multiple_hooks_multiple_inputs,
|
||||
create_module_multiple_hooks_single_input, create_module_no_forward_input,
|
||||
create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
|
||||
create_submodule_forward_single_input,
|
||||
create_submodule_forward_single_input_return_not_tupled,
|
||||
create_submodule_hook_return_nothing,
|
||||
create_submodule_multiple_hooks_multiple_inputs,
|
||||
create_submodule_multiple_hooks_single_input,
|
||||
create_submodule_no_forward_input, create_submodule_same_hook_repeated,
|
||||
create_submodule_to_call_directly_with_hooks)
|
||||
|
||||
# Make the helper files in test/ importable
|
||||
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
|
@ -6,7 +6,18 @@ import torch
|
||||
# grab modules from test_jit_hooks.cpp
|
||||
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
sys.path.append(pytorch_test_dir)
|
||||
from jit.test_hooks_modules import *
|
||||
from jit.test_hooks_modules import (
|
||||
create_forward_tuple_input, create_module_forward_multiple_inputs,
|
||||
create_module_forward_single_input, create_module_hook_return_nothing,
|
||||
create_module_multiple_hooks_multiple_inputs,
|
||||
create_module_multiple_hooks_single_input, create_module_no_forward_input,
|
||||
create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
|
||||
create_submodule_forward_single_input,
|
||||
create_submodule_hook_return_nothing,
|
||||
create_submodule_multiple_hooks_multiple_inputs,
|
||||
create_submodule_multiple_hooks_single_input,
|
||||
create_submodule_same_hook_repeated,
|
||||
create_submodule_to_call_directly_with_hooks)
|
||||
|
||||
# Create saved modules for JIT forward hooks and pre-hooks
|
||||
def main():
|
||||
|
@ -1,8 +1,8 @@
|
||||
import torch
|
||||
import torch.utils.bundled_inputs
|
||||
from torch.utils.mobile_optimizer import *
|
||||
from torch.utils.mobile_optimizer import optimize_for_mobile
|
||||
import io
|
||||
from typing import NamedTuple
|
||||
from typing import Dict, List, NamedTuple
|
||||
from collections import namedtuple
|
||||
|
||||
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
|
||||
|
@ -1,4 +1,4 @@
|
||||
from .squeezenet import *
|
||||
from .super_resolution import *
|
||||
from .op_test import *
|
||||
from .srresnet import *
|
||||
from .squeezenet import * # noqa: F403
|
||||
from .super_resolution import * # noqa: F403
|
||||
from .op_test import * # noqa: F403
|
||||
from .srresnet import * # noqa: F403
|
||||
|
@ -8,7 +8,7 @@ import torch.autograd.function as function
|
||||
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
||||
sys.path.insert(-1, pytorch_test_dir)
|
||||
|
||||
from torch.testing._internal.common_utils import * # noqa: F401
|
||||
from torch.testing._internal.common_utils import * # noqa: F401,F403
|
||||
|
||||
torch.set_default_tensor_type('torch.FloatTensor')
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
import sys
|
||||
sys.argv.append("--jit_executor=legacy")
|
||||
from test_jit_fuser import *
|
||||
from test_jit_fuser import * # noqa: F403
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
@ -1,6 +1,6 @@
|
||||
import sys
|
||||
sys.argv.append("--jit_executor=legacy")
|
||||
from test_jit import *
|
||||
from test_jit import * # noqa: F403
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
@ -1,6 +1,6 @@
|
||||
import sys
|
||||
sys.argv.append("--jit_executor=profiling")
|
||||
from test_jit import *
|
||||
from test_jit import * # noqa: F403
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
@ -1,6 +1,6 @@
|
||||
import sys
|
||||
sys.argv.append("--jit_executor=simple")
|
||||
from test_jit import *
|
||||
from test_jit import * # noqa: F403
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
@ -5,7 +5,9 @@ import torch.backends.xnnpack
|
||||
import torch.utils.bundled_inputs
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch.testing._internal.jit_utils import get_forward, get_forward_graph
|
||||
from torch.utils.mobile_optimizer import *
|
||||
from torch.utils.mobile_optimizer import (LintCode,
|
||||
generate_mobile_module_lints,
|
||||
optimize_for_mobile)
|
||||
from torch.nn import functional as F
|
||||
from torch._C import MobileOptimizerType
|
||||
from torch.testing._internal.common_quantized import override_quantized_engine
|
||||
|
@ -22,7 +22,7 @@ from typing import Dict, List, Any
|
||||
|
||||
from tools.codegen.gen import parse_native_yaml, FileManager
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import BaseOperatorName, NativeFunction
|
||||
import tools.codegen.api.python as python
|
||||
from .gen_python_functions import should_generate_py_binding, is_py_torch_function, \
|
||||
is_py_nn_function, is_py_linalg_function, is_py_variable_method
|
||||
|
@ -8,12 +8,13 @@ from .gen_inplace_or_view_type import VIEW_FUNCTIONS
|
||||
|
||||
from typing import List, Sequence, Tuple
|
||||
|
||||
from tools.codegen.api.autograd import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
|
||||
SavedAttribute, uses_retain_variables,
|
||||
uses_single_grad)
|
||||
from tools.codegen.api.types import Binding
|
||||
from tools.codegen.code_template import CodeTemplate
|
||||
from tools.codegen.gen import FileManager
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.model import Argument
|
||||
|
||||
FUNCTION_DECLARATION = CodeTemplate("""\
|
||||
struct TORCH_API ${op} : public ${superclass} {
|
||||
|
@ -39,12 +39,24 @@ from .gen_trace_type import should_trace
|
||||
|
||||
from tools.codegen.code_template import CodeTemplate
|
||||
from tools.codegen.api import cpp
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.python import *
|
||||
from tools.codegen.api.types import CppSignatureGroup
|
||||
from tools.codegen.api.python import (PythonArgument, PythonSignature,
|
||||
PythonSignatureDeprecated,
|
||||
PythonSignatureGroup,
|
||||
PythonSignatureNativeFunctionPair,
|
||||
arg_parser_output_exprs,
|
||||
argument_type_str, cpp_dispatch_exprs,
|
||||
cpp_dispatch_target,
|
||||
dispatch_lambda_args,
|
||||
dispatch_lambda_exprs,
|
||||
dispatch_lambda_return_str,
|
||||
has_tensor_options,
|
||||
namedtuple_fieldnames, signature)
|
||||
from tools.codegen.gen import cpp_string, parse_native_yaml, FileManager
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.model import (Argument, BaseOperatorName, NativeFunction,
|
||||
Type, Variant)
|
||||
from tools.codegen.utils import split_name_params
|
||||
|
||||
from typing import Dict, Optional, List, Tuple, Set, Sequence, Callable
|
||||
|
||||
|
@ -1,13 +1,14 @@
|
||||
import itertools
|
||||
from typing import Optional, List, Sequence, Union
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import CppSignatureGroup, DispatcherSignature
|
||||
from tools.codegen.api import cpp
|
||||
from tools.codegen.code_template import CodeTemplate
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.utils import mapMaybe
|
||||
from tools.codegen.gen import parse_native_yaml, FileManager
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, NativeFunction, SchemaKind,
|
||||
TensorOptionsArguments)
|
||||
|
||||
# Note [Manual Backend kernels]
|
||||
# For these ops, we want to manually register to dispatch key Backend and
|
||||
|
@ -5,13 +5,13 @@
|
||||
import re
|
||||
from typing import Optional, List
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import CppSignatureGroup
|
||||
from tools.codegen.api import cpp
|
||||
import tools.codegen.api.python as python
|
||||
from tools.codegen.gen import parse_native_yaml, FileManager
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.utils import mapMaybe
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import NativeFunction, TensorOptionsArguments, Variant
|
||||
|
||||
OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>")
|
||||
TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)")
|
||||
|
@ -32,14 +32,18 @@ from .gen_inplace_or_view_type import (
|
||||
ASSIGN_RETURN_VALUE, gen_formals,
|
||||
)
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.autograd import *
|
||||
from tools.codegen.api.types import Binding, DispatcherSignature
|
||||
from tools.codegen.api.autograd import (
|
||||
DifferentiableInput, NativeFunctionWithDifferentiabilityInfo,
|
||||
SavedAttribute, dispatch_strategy, gen_differentiable_outputs,
|
||||
is_differentiable)
|
||||
from tools.codegen.api import cpp
|
||||
from tools.codegen.code_template import CodeTemplate
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.gen import FileManager
|
||||
from tools.codegen.utils import mapMaybe
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, NativeFunction, SchemaKind,
|
||||
SelfArgument, TensorOptionsArguments)
|
||||
from typing import Callable, List, Optional, Sequence, Union
|
||||
|
||||
# We don't set or modify grad_fn on these methods. Generally, they return
|
||||
|
@ -7,13 +7,14 @@ import re
|
||||
from typing import Sequence, Any, Tuple, List, Set, Dict, Match, Optional
|
||||
import yaml
|
||||
|
||||
from tools.codegen.api.autograd import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.autograd import (Derivative, DifferentiabilityInfo,
|
||||
SavedAttribute)
|
||||
from tools.codegen.api.types import Binding, CppSignatureGroup
|
||||
from tools.codegen.api import cpp
|
||||
from tools.codegen.gen import parse_native_yaml
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.model import FunctionSchema, NativeFunction
|
||||
from tools.codegen.utils import IDENT_REGEX, split_name_params
|
||||
|
||||
try:
|
||||
# use faster C loader if available
|
||||
|
@ -3,8 +3,8 @@ import re
|
||||
from typing import Optional, Sequence, List, Tuple
|
||||
|
||||
from tools.codegen.api import cpp
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.types import Binding
|
||||
from tools.codegen.model import NativeFunction, Type, SchemaKind
|
||||
from tools.codegen.utils import IDENT_REGEX
|
||||
|
||||
# Represents a saved attribute involved in backward calculation.
|
||||
|
@ -1,5 +1,10 @@
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.model import (Argument, Arguments, BaseTy, BaseType,
|
||||
FunctionSchema, ListType, NativeFunction,
|
||||
OptionalType, Return, SelfArgument,
|
||||
TensorOptionsArguments, Type, assert_never)
|
||||
from tools.codegen.api.types import (ArgName, BaseCType, Binding,
|
||||
ConstRefCType, CType, MutRefCType,
|
||||
OptionalCType, SpecialArgName)
|
||||
from typing import Optional, Sequence, Union, List, Set
|
||||
|
||||
# This file describes the translation of JIT schema to the public C++
|
||||
|
@ -1,6 +1,8 @@
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, FunctionSchema, Return,
|
||||
SelfArgument, TensorOptionsArguments, Type,
|
||||
assert_never)
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import ArgName, Binding, CType
|
||||
from tools.codegen.api import cpp
|
||||
|
||||
import itertools
|
||||
|
@ -1,5 +1,4 @@
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.model import NativeFunctionsGroup
|
||||
|
||||
# Follows dispatcher calling convention, but:
|
||||
# - Mutable arguments not allowed. Meta functions are always
|
||||
|
@ -1,6 +1,10 @@
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, FunctionSchema, Return,
|
||||
SelfArgument, TensorOptionsArguments, Type,
|
||||
assert_never)
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import (ArgName, BaseCType, Binding,
|
||||
ConstRefCType, CType, MutRefCType,
|
||||
OptionalCType)
|
||||
from tools.codegen.api import cpp
|
||||
|
||||
from typing import Union, Sequence, List, Optional
|
||||
|
@ -1,10 +1,12 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Union, Sequence, Set, List, Dict, Tuple
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import Binding, CppSignature, CppSignatureGroup
|
||||
from tools.codegen.api import cpp
|
||||
from tools.codegen.gen import pythonify_default
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, BaseTy, BaseType, ListType,
|
||||
NativeFunction, OptionalType, Return, Type,
|
||||
Variant)
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
||||
#
|
||||
|
@ -1,6 +1,10 @@
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, BaseTy, BaseType, ListType,
|
||||
NativeFunctionsGroup, OptionalType,
|
||||
SelfArgument, TensorOptionsArguments, Type,
|
||||
assert_never)
|
||||
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import (ArgName, BaseCType, Binding,
|
||||
ConstRefCType, CType, OptionalCType)
|
||||
from tools.codegen.api import cpp
|
||||
|
||||
from typing import Union, List
|
||||
|
@ -1,5 +1,7 @@
|
||||
from typing import Dict, Sequence, List, NoReturn, Union
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.api.types import (BaseCType, Binding, ConstRefCType, CType,
|
||||
Expr, MutRefCType, OptionalCType,
|
||||
SpecialArgName)
|
||||
|
||||
# This file implements a small program synthesis engine that implements
|
||||
# conversions between one API to another.
|
||||
|
@ -1,4 +1,5 @@
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.model import (Argument, FunctionSchema, NativeFunction,
|
||||
SelfArgument, TensorOptionsArguments)
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Union, Sequence, TypeVar, List, Set
|
||||
from enum import Enum
|
||||
|
@ -1,5 +1,5 @@
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.utils import S, T, context
|
||||
from tools.codegen.model import NativeFunction, NativeFunctionsGroup
|
||||
import tools.codegen.local as local
|
||||
|
||||
import functools
|
||||
|
@ -1,9 +1,9 @@
|
||||
from typing import List, Union, Set, Any
|
||||
|
||||
from tools.codegen.context import *
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.context import with_native_function
|
||||
from tools.codegen.utils import concatMap
|
||||
from tools.codegen.model import (NativeFunction, NativeFunctionsGroup,
|
||||
is_structured_dispatch_key)
|
||||
import tools.codegen.api.meta as meta
|
||||
import tools.codegen.api.native as native
|
||||
import tools.codegen.api.structured as structured
|
||||
|
@ -3,10 +3,17 @@ import itertools
|
||||
from typing_extensions import Literal
|
||||
from dataclasses import dataclass
|
||||
|
||||
from tools.codegen.context import *
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.context import method_with_native_function
|
||||
from tools.codegen.utils import Target, mapMaybe
|
||||
from tools.codegen.model import (DispatchKey, NativeFunction,
|
||||
NativeFunctionsGroup, SchemaKind,
|
||||
TensorOptionsArguments, assert_never,
|
||||
is_cuda_dispatch_key,
|
||||
is_structured_dispatch_key)
|
||||
from tools.codegen.api.types import (BaseCType, Binding, ConstRefCType,
|
||||
CppSignature, CppSignatureGroup,
|
||||
DispatcherSignature, Expr, MutRefCType,
|
||||
NativeSignature)
|
||||
import tools.codegen.api.meta as meta
|
||||
import tools.codegen.api.structured as structured
|
||||
from tools.codegen.api.translate import translate
|
||||
|
@ -10,8 +10,15 @@ import json
|
||||
from dataclasses import dataclass
|
||||
|
||||
from tools.codegen.code_template import CodeTemplate
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.types import *
|
||||
from tools.codegen.model import (Argument, DispatchKey, FunctionSchema,
|
||||
Location, NativeFunction,
|
||||
NativeFunctionsGroup, OperatorName,
|
||||
OptionalType, SchemaKind, SelfArgument,
|
||||
TensorOptionsArguments, Type, Variant,
|
||||
assert_never, is_cuda_dispatch_key,
|
||||
is_generic_dispatch_key)
|
||||
from tools.codegen.api.types import (Binding, CppSignature, CppSignatureGroup,
|
||||
DispatcherSignature, NativeSignature)
|
||||
from tools.codegen.api import cpp
|
||||
import tools.codegen.api.dispatcher as dispatcher
|
||||
import tools.codegen.api.native as native
|
||||
@ -19,8 +26,10 @@ import tools.codegen.api.meta as meta
|
||||
import tools.codegen.api.structured as structured
|
||||
from tools.codegen.api.translate import translate
|
||||
from tools.codegen.selective_build.selector import SelectiveBuilder
|
||||
from tools.codegen.utils import *
|
||||
from tools.codegen.context import *
|
||||
from tools.codegen.utils import Target, concatMap, context, mapMaybe
|
||||
from tools.codegen.context import (method_with_native_function,
|
||||
native_function_manager,
|
||||
with_native_function)
|
||||
import tools.codegen.dest as dest
|
||||
|
||||
try:
|
||||
|
@ -5,8 +5,6 @@ from typing import List, Dict, Optional, Iterator, Tuple, Set, NoReturn, Sequenc
|
||||
from enum import Enum, auto
|
||||
import itertools
|
||||
|
||||
from tools.codegen.utils import *
|
||||
|
||||
# A little trick from https://github.com/python/mypy/issues/6366
|
||||
# for getting mypy to do exhaustiveness checking
|
||||
# TODO: put this somewhere else, maybe
|
||||
|
@ -4,7 +4,9 @@ import yaml
|
||||
from dataclasses import dataclass
|
||||
|
||||
from tools.codegen.model import NativeFunction
|
||||
from tools.codegen.selective_build.operator import *
|
||||
from tools.codegen.selective_build.operator import (
|
||||
SelectiveBuildOperator, merge_debug_info, merge_operator_dicts,
|
||||
strip_operator_overload_name)
|
||||
|
||||
# A SelectiveBuilder holds information extracted from the selective build
|
||||
# YAML specification.
|
||||
|
@ -2,9 +2,11 @@
|
||||
import argparse
|
||||
import os
|
||||
from typing import Set
|
||||
from tools.codegen.selective_build.selector import *
|
||||
from tools.codegen.selective_build.selector import SelectiveBuilder
|
||||
from tools.codegen.code_template import CodeTemplate
|
||||
|
||||
import yaml
|
||||
|
||||
if_condition_template_str = """if (kernel_tag_sv.compare("$kernel_tag_name") == 0) {
|
||||
return $dtype_checks;
|
||||
}"""
|
||||
|
@ -3,8 +3,9 @@ from pprint import pformat
|
||||
|
||||
import argparse
|
||||
|
||||
from tools.codegen.model import *
|
||||
from tools.codegen.api.python import *
|
||||
from tools.codegen.model import Variant
|
||||
from tools.codegen.api.python import (PythonSignatureGroup,
|
||||
PythonSignatureNativeFunctionPair)
|
||||
from tools.codegen.gen import FileManager
|
||||
from typing import Sequence, List, Dict
|
||||
|
||||
|
@ -177,7 +177,7 @@ if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
|
||||
import torch._dl as _dl_flags # type: ignore
|
||||
old_flags = sys.getdlopenflags()
|
||||
sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_LAZY)
|
||||
from torch._C import *
|
||||
from torch._C import * # noqa: F403
|
||||
sys.setdlopenflags(old_flags)
|
||||
del old_flags
|
||||
del _dl_flags
|
||||
@ -194,7 +194,7 @@ else:
|
||||
# See Note [Global dependencies]
|
||||
if USE_GLOBAL_DEPS:
|
||||
_load_global_deps()
|
||||
from torch._C import *
|
||||
from torch._C import * # noqa: F403
|
||||
|
||||
# Appease the type checker; ordinarily this binding is inserted by the
|
||||
# torch._C module initialization code in C
|
||||
@ -587,7 +587,7 @@ if TYPE_CHECKING:
|
||||
# Some type signatures pulled in from _VariableFunctions here clash with
|
||||
# signatures already imported. For now these clashes are ignored; see
|
||||
# PR #43339 for details.
|
||||
from torch._C._VariableFunctions import * # type: ignore
|
||||
from torch._C._VariableFunctions import * # type: ignore # noqa: F403
|
||||
|
||||
for name in dir(_C._VariableFunctions):
|
||||
if name.startswith('__'):
|
||||
@ -600,7 +600,7 @@ for name in dir(_C._VariableFunctions):
|
||||
################################################################################
|
||||
|
||||
# needs to be after the above ATen bindings so we can overwrite from Python side
|
||||
from .functional import *
|
||||
from .functional import * # noqa: F403
|
||||
|
||||
|
||||
################################################################################
|
||||
|
@ -1 +1 @@
|
||||
from .tensor import *
|
||||
from .tensor import * # noqa: F403
|
||||
|
@ -491,10 +491,10 @@ def current_blas_handle():
|
||||
return torch._C._cuda_getCurrentBlasHandle()
|
||||
|
||||
|
||||
from .memory import *
|
||||
from .memory import * # noqa: F403
|
||||
|
||||
|
||||
from .random import *
|
||||
from .random import * # noqa: F403
|
||||
|
||||
################################################################################
|
||||
# Define Storage and Tensor classes
|
||||
|
@ -47,7 +47,7 @@ if is_available():
|
||||
_round_robin_process_groups,
|
||||
)
|
||||
|
||||
from .distributed_c10d import *
|
||||
from .distributed_c10d import * # noqa: F403
|
||||
# Variables prefixed with underscore are not auto imported
|
||||
# See the comment in `distributed_c10d.py` above `_backend` on why we expose
|
||||
# this.
|
||||
|
@ -101,7 +101,7 @@ process:
|
||||
to participate in next rendezvous.
|
||||
"""
|
||||
|
||||
from .api import *
|
||||
from .api import * # noqa: F403
|
||||
from .registry import _register_default_handlers
|
||||
|
||||
|
||||
|
@ -1,2 +1,2 @@
|
||||
from .api.remote_module import RemoteModule
|
||||
from .functional import *
|
||||
from .functional import * # noqa: F403
|
||||
|
@ -62,7 +62,7 @@ if is_available():
|
||||
_DEFAULT_RPC_TIMEOUT_SEC,
|
||||
) # noqa: F401
|
||||
from torch._C._distributed_c10d import Store
|
||||
from .api import * # noqa: F401
|
||||
from .api import * # noqa: F401,F403
|
||||
from .options import TensorPipeRpcBackendOptions # noqa: F401
|
||||
from .backend_registry import BackendType
|
||||
from .server_process_global_profiler import (
|
||||
|
@ -109,7 +109,7 @@ from .relaxed_bernoulli import RelaxedBernoulli
|
||||
from .relaxed_categorical import RelaxedOneHotCategorical
|
||||
from .studentT import StudentT
|
||||
from .transformed_distribution import TransformedDistribution
|
||||
from .transforms import *
|
||||
from .transforms import * # noqa: F403
|
||||
from .uniform import Uniform
|
||||
from .von_mises import VonMises
|
||||
from .weibull import Weibull
|
||||
|
@ -1 +1 @@
|
||||
from .onnx import *
|
||||
from .onnx import * # noqa: F403
|
||||
|
@ -22,7 +22,7 @@ __all__ = ['set_sharing_strategy', 'get_sharing_strategy',
|
||||
'get_all_sharing_strategies']
|
||||
|
||||
|
||||
from multiprocessing import *
|
||||
from multiprocessing import * # noqa: F403
|
||||
|
||||
|
||||
__all__ += multiprocessing.__all__ # type: ignore[attr-defined]
|
||||
|
@ -1,4 +1,4 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
from .parameter import Parameter, UninitializedParameter, UninitializedBuffer
|
||||
from .parallel import DataParallel
|
||||
from . import init
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1 +1 @@
|
||||
from .modules import *
|
||||
from .modules import * # noqa: F403
|
||||
|
@ -1,14 +1,14 @@
|
||||
from .quantize import *
|
||||
from .observer import *
|
||||
from .qconfig import *
|
||||
from .fake_quantize import *
|
||||
from .quantize import * # noqa: F403
|
||||
from .observer import * # noqa: F403
|
||||
from .qconfig import * # noqa: F403
|
||||
from .fake_quantize import * # noqa: F403
|
||||
from .fuse_modules import fuse_modules
|
||||
from .stubs import *
|
||||
from .quant_type import *
|
||||
from .quantize_jit import *
|
||||
from .stubs import * # noqa: F403
|
||||
from .quant_type import * # noqa: F403
|
||||
from .quantize_jit import * # noqa: F403
|
||||
# from .quantize_fx import *
|
||||
from .quantization_mappings import *
|
||||
from .fuser_method_mappings import *
|
||||
from .quantization_mappings import * # noqa: F403
|
||||
from .fuser_method_mappings import * # noqa: F403
|
||||
|
||||
def default_eval_fn(model, calib_data):
|
||||
r"""
|
||||
|
@ -17,7 +17,7 @@ from .pattern_utils import (
|
||||
get_default_fusion_patterns,
|
||||
)
|
||||
|
||||
from .fusion_patterns import * # noqa: F401
|
||||
from .fusion_patterns import * # noqa: F401,F403
|
||||
|
||||
from .quantization_types import Pattern
|
||||
|
||||
|
@ -28,6 +28,7 @@ from ..quantize import (
|
||||
|
||||
from ..utils import (
|
||||
get_combined_dict,
|
||||
get_qconfig_dtypes,
|
||||
get_swapped_custom_module_class,
|
||||
weight_is_quantized,
|
||||
activation_is_statically_quantized,
|
||||
@ -52,7 +53,16 @@ from .graph_module import (
|
||||
QuantizedGraphModule,
|
||||
)
|
||||
|
||||
from .quantization_patterns import *
|
||||
from .quantization_patterns import (
|
||||
binary_op_supported_dtypes,
|
||||
BinaryOpQuantizeHandler,
|
||||
CopyNodeQuantizeHandler,
|
||||
CustomModuleQuantizeHandler,
|
||||
DefaultQuantizeHandler,
|
||||
FixedQParamsOpQuantizeHandler,
|
||||
QuantizeHandler,
|
||||
StandaloneModuleQuantizeHandler,
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
_parent_name,
|
||||
@ -66,11 +76,19 @@ from .utils import (
|
||||
node_return_type_is_int,
|
||||
)
|
||||
|
||||
from .qconfig_utils import *
|
||||
from .qconfig_utils import (
|
||||
convert_dict_to_ordered_dict,
|
||||
get_flattened_qconfig_dict,
|
||||
get_object_type_qconfig,
|
||||
get_qconfig,
|
||||
QConfigAny,
|
||||
)
|
||||
|
||||
import operator
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
from typing import Optional, Dict, Any, List, Tuple, Set, Callable
|
||||
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
# Define helper types
|
||||
MatchResult = Tuple[Node, List[Node], Optional[Pattern], QuantizeHandler,
|
||||
|
@ -1,6 +1,14 @@
|
||||
from collections import namedtuple
|
||||
from .observer import *
|
||||
from .fake_quantize import *
|
||||
from .observer import (HistogramObserver, MovingAverageMinMaxObserver,
|
||||
PlaceholderObserver, default_debug_observer,
|
||||
default_dynamic_quant_observer,
|
||||
default_float_qparams_observer, default_observer,
|
||||
default_per_channel_weight_observer,
|
||||
default_placeholder_observer, default_weight_observer)
|
||||
from .fake_quantize import (FakeQuantize, default_fake_quant,
|
||||
default_per_channel_weight_fake_quant,
|
||||
default_weight_fake_quant)
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
|
||||
from typing import Union, Optional
|
||||
|
@ -1,3 +1,3 @@
|
||||
from ._core import *
|
||||
from ._asserts import *
|
||||
from ._check_kernel_launches import *
|
||||
from ._core import * # noqa: F403
|
||||
from ._asserts import * # noqa: F403
|
||||
from ._check_kernel_launches import * # noqa: F403
|
||||
|
@ -1,6 +1,6 @@
|
||||
from torch.utils.benchmark.utils.common import *
|
||||
from torch.utils.benchmark.utils.timer import *
|
||||
from torch.utils.benchmark.utils.compare import *
|
||||
from torch.utils.benchmark.utils.fuzzer import *
|
||||
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import *
|
||||
from torch.utils.benchmark.utils.sparse_fuzzer import *
|
||||
from torch.utils.benchmark.utils.common import * # noqa: F403
|
||||
from torch.utils.benchmark.utils.timer import * # noqa: F403
|
||||
from torch.utils.benchmark.utils.compare import * # noqa: F403
|
||||
from torch.utils.benchmark.utils.fuzzer import * # noqa: F403
|
||||
from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import * # noqa: F403
|
||||
from torch.utils.benchmark.utils.sparse_fuzzer import * # noqa: F403
|
||||
|
@ -1,6 +1,17 @@
|
||||
import collections
|
||||
|
||||
from .constants import *
|
||||
from .constants import (API_BLAS, API_C10, API_CAFFE2, API_DRIVER, API_FFT,
|
||||
API_PYTORCH, API_RAND, API_ROCTX, API_RTC, API_RUNTIME,
|
||||
API_SPARSE, CONV_CACHE, CONV_CONTEXT, CONV_D3D9,
|
||||
CONV_D3D10, CONV_D3D11, CONV_DEF, CONV_DEVICE,
|
||||
CONV_DEVICE_FUNC, CONV_EGL, CONV_ERROR, CONV_EVENT,
|
||||
CONV_EXEC, CONV_GL, CONV_GRAPHICS, CONV_INCLUDE,
|
||||
CONV_INCLUDE_CUDA_MAIN_H, CONV_INIT, CONV_JIT,
|
||||
CONV_MATH_FUNC, CONV_MEM, CONV_MODULE,
|
||||
CONV_NUMERIC_LITERAL, CONV_OCCUPANCY, CONV_OTHER,
|
||||
CONV_PEER, CONV_SPECIAL_FUNC, CONV_STREAM,
|
||||
CONV_SURFACE, CONV_TEX, CONV_THREAD, CONV_TYPE,
|
||||
CONV_VDPAU, CONV_VERSION, HIP_UNSUPPORTED)
|
||||
|
||||
""" Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents
|
||||
This closely follows the implementation in hipify-clang
|
||||
|
Reference in New Issue
Block a user