mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[BE][Easy] eliminate relative import in torchgen
(#128872)
Fix generated by: ```bash ruff check --config 'lint.flake8-tidy-imports.ban-relative-imports="all"' --fix --select=TID $(fd '.pyi?$' torchgen) ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/128872 Approved by: https://github.com/zou3519
This commit is contained in:
committed by
PyTorch MergeBot
parent
e1c1052829
commit
b697808056
@ -3,7 +3,6 @@ from dataclasses import dataclass
|
||||
from typing import cast, Dict, List, Match, Optional, Sequence, Set, Tuple
|
||||
|
||||
from torchgen import local
|
||||
|
||||
from torchgen.api import cpp
|
||||
from torchgen.api.types import BaseCType, Binding, NamedCType, tensorListT
|
||||
from torchgen.model import (
|
||||
|
@ -48,6 +48,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import assert_never
|
||||
|
||||
|
||||
# This file describes the translation of JIT schema to the public C++
|
||||
# API, which is what people use when they call functions like at::add.
|
||||
#
|
||||
|
@ -2,7 +2,6 @@ import itertools
|
||||
from typing import List, Sequence, Union
|
||||
|
||||
from torchgen.api import cpp
|
||||
|
||||
from torchgen.api.types import ArgName, Binding, CType, NamedCType
|
||||
from torchgen.model import (
|
||||
Argument,
|
||||
@ -14,6 +13,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import assert_never, concatMap
|
||||
|
||||
|
||||
# This file describes the translation of JIT schema to the dispatcher
|
||||
# API, the *unboxed* calling convention by which invocations through
|
||||
# the dispatcher are made. Historically, the dispatcher API matched
|
||||
|
@ -20,7 +20,6 @@ from torchgen.api.types import (
|
||||
SymIntT,
|
||||
VectorCType,
|
||||
)
|
||||
|
||||
from torchgen.model import (
|
||||
Argument,
|
||||
BaseTy,
|
||||
|
@ -1,5 +1,6 @@
|
||||
from torchgen.model import NativeFunctionsGroup
|
||||
|
||||
|
||||
# Follows dispatcher calling convention, but:
|
||||
# - Mutable arguments not allowed. Meta functions are always
|
||||
# written in functional form. Look at FunctionSchema.signature()
|
||||
|
@ -2,7 +2,6 @@ from typing import List, Optional, Sequence, Union
|
||||
|
||||
from torchgen import local
|
||||
from torchgen.api import cpp
|
||||
|
||||
from torchgen.api.types import (
|
||||
ArgName,
|
||||
BaseCType,
|
||||
@ -30,6 +29,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import assert_never
|
||||
|
||||
|
||||
# This file describes the translation of JIT schema to the native functions API.
|
||||
# This looks a lot like the C++ API (which makes historical sense, because the
|
||||
# idea was you wrote native functions to implement functions in the C++ API),
|
||||
|
@ -17,6 +17,7 @@ from torchgen.model import (
|
||||
Variant,
|
||||
)
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
||||
#
|
||||
# Data Models
|
||||
|
@ -1,7 +1,6 @@
|
||||
from typing import List, Union
|
||||
|
||||
from torchgen.api import cpp
|
||||
|
||||
from torchgen.api.types import (
|
||||
ArgName,
|
||||
ArrayRefCType,
|
||||
@ -33,6 +32,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import assert_never
|
||||
|
||||
|
||||
# This file describes the translation of JIT schema to the structured functions API.
|
||||
# This is similar to native API, but a number of historical problems with native
|
||||
# API have been fixed.
|
||||
|
@ -33,6 +33,7 @@ from torchgen.api.types import (
|
||||
VectorCType,
|
||||
)
|
||||
|
||||
|
||||
# This file implements a small program synthesis engine that implements
|
||||
# conversions between one API to another.
|
||||
#
|
||||
|
@ -1,3 +1,3 @@
|
||||
from .types import *
|
||||
from .types_base import *
|
||||
from .signatures import * # isort:skip
|
||||
from torchgen.api.types.types import *
|
||||
from torchgen.api.types.types_base import *
|
||||
from torchgen.api.types.signatures import * # usort:skip
|
||||
|
@ -1,7 +1,7 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from typing import Iterator, List, Optional, Sequence, Set, Tuple, Union
|
||||
|
||||
from torchgen.api.types.types_base import Binding, CType, Expr
|
||||
from torchgen.model import (
|
||||
BackendIndex,
|
||||
FunctionSchema,
|
||||
@ -10,8 +10,6 @@ from torchgen.model import (
|
||||
NativeFunctionsViewGroup,
|
||||
)
|
||||
|
||||
from .types_base import Binding, CType, Expr
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CppSignature:
|
||||
|
@ -15,9 +15,7 @@ Add new types to `types_base.py` if they are basic and not attached to ATen/c10.
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict
|
||||
|
||||
from torchgen.model import BaseTy, ScalarType
|
||||
|
||||
from .types_base import (
|
||||
from torchgen.api.types.types_base import (
|
||||
BaseCppType,
|
||||
BaseCType,
|
||||
boolT,
|
||||
@ -30,6 +28,7 @@ from .types_base import (
|
||||
longT,
|
||||
shortT,
|
||||
)
|
||||
from torchgen.model import BaseTy, ScalarType
|
||||
|
||||
|
||||
TENSOR_LIST_LIKE_CTYPES = [
|
||||
|
@ -19,6 +19,7 @@ from typing import List, Optional, Union
|
||||
|
||||
from torchgen.model import Argument, SelfArgument, TensorOptionsArguments
|
||||
|
||||
|
||||
# An ArgName is just the str name of the argument in schema;
|
||||
# but in some special circumstances, we may add a little extra
|
||||
# context. The Enum SpecialArgName covers all of these cases;
|
||||
|
@ -2,7 +2,6 @@ from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
import torchgen.api.types as api_types
|
||||
|
||||
from torchgen.api import cpp, structured
|
||||
from torchgen.api.types import (
|
||||
ArgName,
|
||||
|
@ -12,6 +12,7 @@ from torchgen.model import (
|
||||
Type,
|
||||
)
|
||||
|
||||
|
||||
# This file generates the code for unboxing wrappers, i.e., the glue logic to unbox a boxed operator and convert the
|
||||
# ivalues from stack to correct arguments to the unboxed kernel, based on corresponding JIT schema. This codegen is
|
||||
# an alternative way to generate unboxing wrappers similar to the existing C++ metaprogramming approach but gets the
|
||||
|
@ -1,6 +1,7 @@
|
||||
import re
|
||||
from typing import Mapping, Match, Optional, Sequence
|
||||
|
||||
|
||||
# match $identifier or ${identifier} and replace with value in env
|
||||
# If this identifier is at the beginning of whitespace on a line
|
||||
# and its value is a list then it is treated as
|
||||
|
@ -1,5 +1,4 @@
|
||||
import contextlib
|
||||
|
||||
import functools
|
||||
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, TypeVar, Union
|
||||
|
||||
@ -13,6 +12,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import context, S, T
|
||||
|
||||
|
||||
# Helper functions for defining generators on things in the model
|
||||
|
||||
F = TypeVar(
|
||||
|
@ -4,6 +4,7 @@ from pathlib import Path
|
||||
|
||||
from torch.jit._decompositions import decomposition_table
|
||||
|
||||
|
||||
# from torchgen.code_template import CodeTemplate
|
||||
|
||||
DECOMP_HEADER = r"""
|
||||
|
@ -1,18 +1,18 @@
|
||||
from .lazy_ir import (
|
||||
from torchgen.dest.lazy_ir import (
|
||||
generate_non_native_lazy_ir_nodes as generate_non_native_lazy_ir_nodes,
|
||||
GenLazyIR as GenLazyIR,
|
||||
GenLazyNativeFuncDefinition as GenLazyNativeFuncDefinition,
|
||||
GenLazyShapeInferenceDefinition as GenLazyShapeInferenceDefinition,
|
||||
)
|
||||
from .native_functions import (
|
||||
from torchgen.dest.native_functions import (
|
||||
compute_native_function_declaration as compute_native_function_declaration,
|
||||
)
|
||||
from .register_dispatch_key import (
|
||||
from torchgen.dest.register_dispatch_key import (
|
||||
gen_registration_headers as gen_registration_headers,
|
||||
gen_registration_helpers as gen_registration_helpers,
|
||||
RegisterDispatchKey as RegisterDispatchKey,
|
||||
)
|
||||
from .ufunc import (
|
||||
from torchgen.dest.ufunc import (
|
||||
compute_ufunc_cpu as compute_ufunc_cpu,
|
||||
compute_ufunc_cpu_kernel as compute_ufunc_cpu_kernel,
|
||||
compute_ufunc_cuda as compute_ufunc_cuda,
|
||||
|
@ -3,7 +3,6 @@ from typing import List, Optional, Union
|
||||
import torchgen.api.meta as meta
|
||||
import torchgen.api.structured as structured
|
||||
from torchgen.api.types import kernel_signature
|
||||
|
||||
from torchgen.context import with_native_function_and_index
|
||||
from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
|
||||
from torchgen.utils import mapMaybe
|
||||
|
@ -27,6 +27,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import OrderedSet
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
||||
#
|
||||
# CUDA STUFF
|
||||
|
@ -1,12 +1,11 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Sequence, Tuple
|
||||
|
||||
from torchgen import dest
|
||||
|
||||
# disable import sorting to avoid circular dependency.
|
||||
from torchgen.api.types import DispatcherSignature # isort:skip
|
||||
from torchgen.api.types import DispatcherSignature # usort:skip
|
||||
from torchgen.context import method_with_native_function
|
||||
from torchgen.executorch.model import ETKernelIndex
|
||||
from torchgen.model import BaseTy, BaseType, DispatchKey, NativeFunction, Variant
|
||||
|
@ -15,6 +15,14 @@ from torchgen.api.types import (
|
||||
VectorCType,
|
||||
voidT,
|
||||
)
|
||||
from torchgen.executorch.api.types import (
|
||||
ArrayRefCType,
|
||||
BaseTypeToCppMapping,
|
||||
OptionalCType,
|
||||
scalarT,
|
||||
tensorListT,
|
||||
tensorT,
|
||||
)
|
||||
from torchgen.model import (
|
||||
Argument,
|
||||
Arguments,
|
||||
@ -29,14 +37,7 @@ from torchgen.model import (
|
||||
Type,
|
||||
)
|
||||
from torchgen.utils import assert_never
|
||||
from .types import (
|
||||
ArrayRefCType,
|
||||
BaseTypeToCppMapping,
|
||||
OptionalCType,
|
||||
scalarT,
|
||||
tensorListT,
|
||||
tensorT,
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
This file describes the translation of JIT schema to the public C++ API, which is what people use when they call
|
||||
|
@ -1,2 +1,2 @@
|
||||
from .types import *
|
||||
from .signatures import * # isort:skip
|
||||
from torchgen.executorch.api.types.types import *
|
||||
from torchgen.executorch.api.types.signatures import * # usort:skip
|
||||
|
@ -2,12 +2,10 @@ from dataclasses import dataclass
|
||||
from typing import List, Optional, Set
|
||||
|
||||
import torchgen.api.cpp as aten_cpp
|
||||
|
||||
from torchgen.api.types import Binding, CType
|
||||
from torchgen.executorch.api.types.types import contextArg
|
||||
from torchgen.model import FunctionSchema, NativeFunction
|
||||
|
||||
from .types import contextArg
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutorchCppSignature:
|
||||
|
@ -15,6 +15,7 @@ from torchgen.api.types import (
|
||||
)
|
||||
from torchgen.model import BaseTy
|
||||
|
||||
|
||||
halfT = BaseCppType("torch::executor", "Half")
|
||||
bfloat16T = BaseCppType("torch::executor", "BFloat16")
|
||||
stringT = BaseCppType("torch::executor", "string_view")
|
||||
|
@ -12,6 +12,7 @@ from torchgen.model import (
|
||||
Type,
|
||||
)
|
||||
|
||||
|
||||
connector = "\n\t"
|
||||
|
||||
|
||||
|
@ -17,6 +17,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import assert_never
|
||||
|
||||
|
||||
KERNEL_KEY_VERSION = 1
|
||||
|
||||
|
||||
|
@ -4,7 +4,6 @@ from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
import yaml
|
||||
|
||||
from torchgen.executorch.model import ETKernelIndex, ETKernelKey
|
||||
|
||||
from torchgen.gen import LineLoader, parse_native_yaml
|
||||
from torchgen.model import (
|
||||
BackendMetadata,
|
||||
@ -15,6 +14,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import NamespaceHelper
|
||||
|
||||
|
||||
# Parse native_functions.yaml into a sequence of NativeFunctions and ET Backend Indices.
|
||||
ETParsedYaml = namedtuple("ETParsedYaml", ["native_functions", "et_kernel_indices"])
|
||||
|
||||
|
@ -4,6 +4,7 @@ import os
|
||||
from torch._inductor import pattern_matcher
|
||||
from torch._inductor.fx_passes import joint_graph
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Start by deleting all the existing patterns.
|
||||
for file in os.listdir(pattern_matcher.SERIALIZED_PATTERN_PATH):
|
||||
|
@ -3,7 +3,6 @@ import functools
|
||||
import json
|
||||
import os
|
||||
import pathlib
|
||||
|
||||
from collections import defaultdict, namedtuple, OrderedDict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import (
|
||||
@ -27,7 +26,6 @@ import torchgen.api.meta as meta
|
||||
import torchgen.api.native as native
|
||||
import torchgen.api.structured as structured
|
||||
import torchgen.dest as dest
|
||||
|
||||
from torchgen.aoti.fallback_ops import inductor_fallback_ops
|
||||
from torchgen.api import cpp
|
||||
from torchgen.api.translate import translate
|
||||
@ -59,7 +57,6 @@ from torchgen.gen_functionalization_type import (
|
||||
GenCompositeViewCopyKernel,
|
||||
)
|
||||
from torchgen.gen_vmap_plumbing import gen_all_vmap_plumbing
|
||||
|
||||
from torchgen.model import (
|
||||
Argument,
|
||||
BackendIndex,
|
||||
@ -105,6 +102,7 @@ from torchgen.utils import (
|
||||
)
|
||||
from torchgen.yaml_utils import YamlDumper, YamlLoader
|
||||
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
# Welcome to the ATen code generator v2! The ATen code generator is
|
||||
|
@ -4,7 +4,6 @@ from typing import Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
from torchgen.api.types import DispatcherSignature
|
||||
from torchgen.api.types.signatures import CppSignature, CppSignatureGroup
|
||||
|
||||
from torchgen.context import method_with_native_function
|
||||
from torchgen.model import (
|
||||
Argument,
|
||||
@ -22,6 +21,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import mapMaybe
|
||||
|
||||
|
||||
base_type_to_c_type = {
|
||||
BaseTy.Tensor: "AtenTensorHandle",
|
||||
BaseTy.bool: "int32_t", # Use int to pass bool
|
||||
|
@ -46,7 +46,6 @@ from torchgen.native_function_generation import (
|
||||
MUTABLE_OPS_THAT_CANNOT_GET_AN_OUT_VARIANT,
|
||||
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY,
|
||||
)
|
||||
|
||||
from torchgen.selective_build.selector import SelectiveBuilder
|
||||
from torchgen.utils import dataclass_repr
|
||||
|
||||
|
@ -18,22 +18,21 @@ from typing import (
|
||||
import yaml
|
||||
|
||||
import torchgen.dest as dest
|
||||
|
||||
from torchgen.api.lazy import setValueT
|
||||
from torchgen.api.types import BaseCppType
|
||||
from torchgen.dest.lazy_ir import GenLazyIR, GenLazyNativeFuncDefinition, GenTSLazyIR
|
||||
from torchgen.gen import get_grouped_native_functions, parse_native_yaml
|
||||
|
||||
from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
|
||||
from torchgen.selective_build.selector import SelectiveBuilder
|
||||
from torchgen.utils import FileManager, NamespaceHelper
|
||||
from torchgen.yaml_utils import YamlLoader
|
||||
from .gen_backend_stubs import (
|
||||
from torchgen.gen_backend_stubs import (
|
||||
error_on_missing_kernels,
|
||||
gen_dispatcher_registrations,
|
||||
gen_dispatchkey_nativefunc_headers,
|
||||
parse_backend_yaml,
|
||||
)
|
||||
from torchgen.model import NativeFunction, NativeFunctionsGroup, OperatorName
|
||||
from torchgen.selective_build.selector import SelectiveBuilder
|
||||
from torchgen.utils import FileManager, NamespaceHelper
|
||||
from torchgen.yaml_utils import YamlLoader
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
||||
#
|
||||
|
@ -2,6 +2,7 @@ import threading
|
||||
from contextlib import contextmanager
|
||||
from typing import Iterator, Optional
|
||||
|
||||
|
||||
# Simple dynamic scoping implementation. The name "parametrize" comes
|
||||
# from Racket.
|
||||
#
|
||||
|
@ -1,13 +1,13 @@
|
||||
import dataclasses
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import auto, Enum
|
||||
from typing import Callable, Dict, Iterator, List, Optional, Sequence, Set, Tuple, Union
|
||||
|
||||
from torchgen.utils import assert_never, NamespaceHelper, OrderedSet
|
||||
|
||||
|
||||
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
|
||||
#
|
||||
# DATA MODEL
|
||||
|
@ -1,5 +1,4 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
import torchgen.api.dispatcher as dispatcher
|
||||
@ -27,6 +26,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.utils import concatMap
|
||||
|
||||
|
||||
# See Note: [Out ops with functional variants that don't get grouped properly]
|
||||
OUT_OPS_THAT_DONT_GET_GROUPED_PROPERLY = [
|
||||
# This has a functional variant, but it's currently marked private.
|
||||
|
@ -7,7 +7,6 @@ from typing import Any, Dict, List
|
||||
|
||||
import torch
|
||||
from torch.jit.generate_bytecode import generate_upgraders_bytecode
|
||||
|
||||
from torchgen.code_template import CodeTemplate
|
||||
from torchgen.operator_versions.gen_mobile_upgraders_constant import (
|
||||
MOBILE_UPGRADERS_HEADER_DESCRIPTION,
|
||||
|
@ -10,6 +10,7 @@ from torchgen.context import native_function_manager
|
||||
from torchgen.model import DispatchKey, NativeFunctionsGroup, NativeFunctionsViewGroup
|
||||
from torchgen.static_runtime import config, generator
|
||||
|
||||
|
||||
# Given a list of `grouped_native_functions` sorted by their op names, return a list of
|
||||
# lists each of which groups ops that share the base name. For example, `mean` and
|
||||
# `mean.dim` are grouped together by this function.
|
||||
|
@ -1,6 +1,5 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import math
|
||||
from typing import Dict, List, Optional, Sequence, Tuple, Union
|
||||
|
||||
@ -21,6 +20,7 @@ from torchgen.model import (
|
||||
)
|
||||
from torchgen.static_runtime import config
|
||||
|
||||
|
||||
logger: logging.Logger = logging.getLogger()
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user