mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/76275 In preparation for addressing https://github.com/pytorch/pytorch/issues/73212 Diff was generated with: ``` git mv tools/codegen torchgen git grep -l 'tools.codegen' | xargs sed -i 's/tools.codegen/torchgen/g' sed -i "s/\${TOOLS_PATH}\/codegen/\${TORCH_ROOT}\/torchgen/g" caffe2/CMakeLists.txt ``` and a manual edits to: * tools/test/test_gen_backend_stubs.py * torchgen/build.bzl * torchgen/gen_backend_stubs.py aka this diff: ``` diff --git a/tools/test/test_gen_backend_stubs.py b/tools/test/test_gen_backend_stubs.py index 3dc26c6d2d..104054575e 100644 --- a/tools/test/test_gen_backend_stubs.py +++ b/tools/test/test_gen_backend_stubs.py @@ -9,7 +9,7 @@ from torchgen.gen_backend_stubs import run from torchgen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE # noqa: F401 path = os.path.dirname(os.path.realpath(__file__)) -gen_backend_stubs_path = os.path.join(path, '../torchgen/gen_backend_stubs.py') +gen_backend_stubs_path = os.path.join(path, '../../torchgen/gen_backend_stubs.py') # gen_backend_stubs.py is an integration point that is called directly by external backends. # The tests here are to confirm that badly formed inputs result in reasonable error messages. diff --git a/torchgen/build.bzl b/torchgen/build.bzl index ed04e35a43..d00078a3cf 100644 --- a/torchgen/build.bzl +++ b/torchgen/build.bzl @@ -1,6 +1,6 @@ def define_targets(rules): rules.py_library( - name = "codegen", + name = "torchgen", srcs = rules.glob(["**/*.py"]), deps = [ rules.requirement("PyYAML"), @@ -11,6 +11,6 @@ def define_targets(rules): rules.py_binary( name = "gen", - srcs = [":codegen"], + srcs = [":torchgen"], visibility = ["//visibility:public"], ) diff --git a/torchgen/gen_backend_stubs.py b/torchgen/gen_backend_stubs.py index c1a672a655..beee7a15e0 100644 --- a/torchgen/gen_backend_stubs.py +++ b/torchgen/gen_backend_stubs.py @@ -474,7 +474,7 @@ def run( ) -> None: # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py - pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute() + pytorch_root = pathlib.Path(__file__).parent.parent.absolute() template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates") def make_file_manager(install_dir: str) -> FileManager: ``` run_all_fbandroid_tests Test Plan: sandcastle Reviewed By: albanD, ngimel Differential Revision: D35770317 fbshipit-source-id: 153ac4a7fef15b1e750812a90bfafdbc8f1ebcdf (cherry picked from commit c6d485d1d4648fa1c8a4c14c5bf3d8e899b9b4dd)
144 lines
4.7 KiB
Python
144 lines
4.7 KiB
Python
from torchgen.model import (
|
|
Argument,
|
|
FunctionSchema,
|
|
Return,
|
|
SelfArgument,
|
|
TensorOptionsArguments,
|
|
Type,
|
|
)
|
|
|
|
from torchgen.api.types import (
|
|
ArgName,
|
|
BaseCType,
|
|
Binding,
|
|
ConstRefCType,
|
|
NamedCType,
|
|
CType,
|
|
MutRefCType,
|
|
ListCType,
|
|
OptionalCType,
|
|
tensorT,
|
|
scalarT,
|
|
layoutT,
|
|
deviceT,
|
|
boolT,
|
|
scalarTypeT,
|
|
)
|
|
from torchgen.api import cpp
|
|
from torchgen import local
|
|
from torchgen.utils import assert_never
|
|
|
|
from typing import Union, Sequence, List, Optional
|
|
|
|
# This file describes the translation of JIT schema to the native functions API.
|
|
# This looks a lot like the C++ API (which makes historical sense, because the
|
|
# idea was you wrote native functions to implement functions in the C++ API),
|
|
# but over time we have evolved the C++ API without actually changing our
|
|
# native:: kernels. The intention is to make native API and dispatcher API
|
|
# line up as closely as possible, since this results in the least overhead
|
|
# (no translation is needed from dispatcher API to native API).
|
|
|
|
|
|
def name(func: FunctionSchema) -> str:
|
|
name = str(func.name.name)
|
|
# TODO: delete this!
|
|
if func.is_out_fn():
|
|
name += "_out"
|
|
if func.name.overload_name:
|
|
name += f"_{func.name.overload_name}"
|
|
return name
|
|
|
|
|
|
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
|
|
if str(t) == "Tensor?":
|
|
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
|
|
if mutable and not local.use_const_ref_for_mutable_tensors():
|
|
return NamedCType(binds, MutRefCType(tensor_type))
|
|
else:
|
|
return NamedCType(binds, ConstRefCType(tensor_type))
|
|
elif str(t) == "Tensor?[]":
|
|
return NamedCType(
|
|
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
|
|
)
|
|
elif str(t) == "Scalar":
|
|
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
|
elif str(t) == "Scalar?":
|
|
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
|
return cpp.argumenttype_type(t, mutable=mutable, binds=binds)
|
|
|
|
|
|
def returns_type(rs: Sequence[Return]) -> CType:
|
|
return cpp.returns_type(rs)
|
|
|
|
|
|
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
|
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
|
|
|
|
|
def argument(
|
|
a: Union[Argument, SelfArgument, TensorOptionsArguments], *, is_out: bool
|
|
) -> List[Binding]:
|
|
# Ideally, we NEVER default native functions. However, there are a number
|
|
# of functions that call native:: directly and rely on the defaulting
|
|
# existing. So for BC, we generate defaults for non-out variants (but not
|
|
# for out variants, where it is impossible to generate an appropriate
|
|
# default)
|
|
should_default = not is_out
|
|
if isinstance(a, Argument):
|
|
default: Optional[str] = None
|
|
if should_default and a.default is not None:
|
|
default = cpp.default_expr(a.default, a.type)
|
|
return [
|
|
Binding(
|
|
nctype=argument_type(a, binds=a.name),
|
|
name=a.name,
|
|
default=default,
|
|
argument=a,
|
|
)
|
|
]
|
|
elif isinstance(a, SelfArgument):
|
|
# Erase SelfArgument from the distinction
|
|
return argument(a.argument, is_out=is_out)
|
|
elif isinstance(a, TensorOptionsArguments):
|
|
default = None
|
|
if should_default:
|
|
default = "{}"
|
|
# TODO: Not sure why the arguments assigned here are for
|
|
# TensorOptionsArguments and not the constituent pieces. It seems
|
|
# to matter
|
|
return [
|
|
Binding(
|
|
nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))),
|
|
name="dtype",
|
|
default=default,
|
|
argument=a,
|
|
),
|
|
Binding(
|
|
nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))),
|
|
name="layout",
|
|
default=default,
|
|
argument=a,
|
|
),
|
|
Binding(
|
|
nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))),
|
|
name="device",
|
|
default=default,
|
|
argument=a,
|
|
),
|
|
Binding(
|
|
nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))),
|
|
name="pin_memory",
|
|
default=default,
|
|
argument=a,
|
|
),
|
|
]
|
|
else:
|
|
assert_never(a)
|
|
|
|
|
|
def arguments(func: FunctionSchema) -> List[Binding]:
|
|
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
|
args.extend(func.arguments.non_out)
|
|
args.extend(func.arguments.out)
|
|
return [r for arg in args for r in argument(arg, is_out=func.is_out_fn())]
|