Rename tools/codegen to torchgen (#76275)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/76275

In preparation for addressing
https://github.com/pytorch/pytorch/issues/73212

Diff was generated with:

```
git mv tools/codegen torchgen
git grep -l 'tools.codegen' | xargs sed -i 's/tools.codegen/torchgen/g'
sed -i "s/\${TOOLS_PATH}\/codegen/\${TORCH_ROOT}\/torchgen/g" caffe2/CMakeLists.txt
```

and a manual edits to:

* tools/test/test_gen_backend_stubs.py
* torchgen/build.bzl
* torchgen/gen_backend_stubs.py

aka this diff:

```
 diff --git a/tools/test/test_gen_backend_stubs.py b/tools/test/test_gen_backend_stubs.py
index 3dc26c6d2d..104054575e 100644
 --- a/tools/test/test_gen_backend_stubs.py
+++ b/tools/test/test_gen_backend_stubs.py
@@ -9,7 +9,7 @@ from torchgen.gen_backend_stubs import run
 from torchgen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE  # noqa: F401

 path = os.path.dirname(os.path.realpath(__file__))
-gen_backend_stubs_path = os.path.join(path, '../torchgen/gen_backend_stubs.py')
+gen_backend_stubs_path = os.path.join(path, '../../torchgen/gen_backend_stubs.py')

 # gen_backend_stubs.py is an integration point that is called directly by external backends.
 # The tests here are to confirm that badly formed inputs result in reasonable error messages.
 diff --git a/torchgen/build.bzl b/torchgen/build.bzl
index ed04e35a43..d00078a3cf 100644
 --- a/torchgen/build.bzl
+++ b/torchgen/build.bzl
@@ -1,6 +1,6 @@
 def define_targets(rules):
     rules.py_library(
-        name = "codegen",
+        name = "torchgen",
         srcs = rules.glob(["**/*.py"]),
         deps = [
             rules.requirement("PyYAML"),
@@ -11,6 +11,6 @@ def define_targets(rules):

     rules.py_binary(
         name = "gen",
-        srcs = [":codegen"],
+        srcs = [":torchgen"],
         visibility = ["//visibility:public"],
     )
 diff --git a/torchgen/gen_backend_stubs.py b/torchgen/gen_backend_stubs.py
index c1a672a655..beee7a15e0 100644
 --- a/torchgen/gen_backend_stubs.py
+++ b/torchgen/gen_backend_stubs.py
@@ -474,7 +474,7 @@ def run(
 ) -> None:

     # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
-    pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
+    pytorch_root = pathlib.Path(__file__).parent.parent.absolute()
     template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")

     def make_file_manager(install_dir: str) -> FileManager:
```

run_all_fbandroid_tests

Test Plan: sandcastle

Reviewed By: albanD, ngimel

Differential Revision: D35770317

fbshipit-source-id: 153ac4a7fef15b1e750812a90bfafdbc8f1ebcdf
(cherry picked from commit c6d485d1d4648fa1c8a4c14c5bf3d8e899b9b4dd)
This commit is contained in:
Edward Yang
2022-04-24 18:32:01 -07:00
committed by PyTorch MergeBot
parent 8d31706b9e
commit 36420b5e8c
85 changed files with 271 additions and 271 deletions

View File

@ -56,7 +56,7 @@ sudo apt-get -y install doxygen
# Generate ATen files
pushd "${pt_checkout}"
pip install -r requirements.txt
time python -m tools.codegen.gen \
time python -m torchgen.gen \
-s aten/src/ATen \
-d build/aten/src/ATen

View File

@ -26,7 +26,7 @@ set -x
rm -rf "$OUT"
# aten codegen
python -m tools.codegen.gen \
python -m torchgen.gen \
-d "$OUT"/torch/share/ATen
# torch codegen

View File

@ -95,7 +95,7 @@ generate_aten(
aten_ufunc_generated_cuda_sources("aten/src/ATen/{}") +
["aten/src/ATen/Declarations.yaml"]
),
generator = "//tools/codegen:gen",
generator = "//torchgen:gen",
)
libtorch_cpp_generated_sources = [
@ -1345,7 +1345,7 @@ cc_library(
py_binary(
name = "gen_op",
srcs = ["caffe2/contrib/aten/gen_op.py"],
deps = ["//tools/codegen"],
deps = ["//torchgen"],
)
genrule(

View File

@ -4,7 +4,7 @@ import argparse
import glob
import sys
import os
from tools.codegen.code_template import CodeTemplate
from torchgen.code_template import CodeTemplate
H_NAME = "glsl.h"
CPP_NAME = "glsl.cpp"

View File

@ -6,7 +6,7 @@ import glob
import os
import sys
import subprocess
from tools.codegen.code_template import CodeTemplate
from torchgen.code_template import CodeTemplate
H_NAME = "spv.h"
CPP_NAME = "spv.cpp"

View File

@ -291,7 +291,7 @@ If two backends have the same dispatch function, you can write `CPU, CUDA: func`
to reuse the same function name in both cases.
Available backend options can be found by searching `dispatch_keys` in
[codegen](https://github.com/pytorch/pytorch/blob/master/tools/codegen/gen.py).
[codegen](https://github.com/pytorch/pytorch/blob/master/torchgen/gen.py).
There are also two special "generic" backends:
- `CompositeExplicitAutograd` (previously known as `DefaultBackend`):

View File

@ -97,7 +97,7 @@ enum class BackendComponent : uint8_t {
// See Note [DispatchKeySet Internal Representation] for more details.
//
// NOTE: Keep the list in sync with `DispatchKey` in tools/codegen/model.py
// NOTE: Keep the list in sync with `DispatchKey` in torchgen/model.py
enum class DispatchKey : uint16_t {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~ UNDEFINED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ //

View File

@ -63,7 +63,7 @@ if(INTERN_BUILD_ATEN_OPS)
set(CMAKE_POSITION_INDEPENDENT_CODE ${__caffe2_CMAKE_POSITION_INDEPENDENT_CODE})
# Generate the headers wrapped by our operator
file(GLOB_RECURSE all_python "${PROJECT_SOURCE_DIR}/tools/codegen/*.py")
file(GLOB_RECURSE all_python "${PROJECT_SOURCE_DIR}/torchgen/*.py")
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/contrib/aten/aten_op.h
COMMAND
"${PYTHON_EXECUTABLE}" ${CMAKE_CURRENT_SOURCE_DIR}/contrib/aten/gen_op.py
@ -458,10 +458,10 @@ if(NOT INTERN_BUILD_MOBILE OR NOT BUILD_CAFFE2_MOBILE)
"${TOOLS_PATH}/autograd/gen_variable_type.py"
"${TOOLS_PATH}/autograd/gen_inplace_or_view_type.py"
"${TOOLS_PATH}/autograd/load_derivatives.py"
"${TOOLS_PATH}/codegen/gen_backend_stubs.py"
"${TOOLS_PATH}/codegen/gen_lazy_tensor.py"
"${TOOLS_PATH}/codegen/api/lazy.py"
"${TOOLS_PATH}/codegen/dest/lazy_ir.py"
"${TORCH_ROOT}/torchgen/gen_backend_stubs.py"
"${TORCH_ROOT}/torchgen/gen_lazy_tensor.py"
"${TORCH_ROOT}/torchgen/api/lazy.py"
"${TORCH_ROOT}/torchgen/dest/lazy_ir.py"
WORKING_DIRECTORY "${TORCH_ROOT}")

View File

@ -37,9 +37,9 @@ if args.aten_root:
raise ValueError('aten_root ({}) does not exist'.format(
args.aten_root))
sys.path.insert(0, os.path.join(args.aten_root, '..'))
from tools.codegen.code_template import CodeTemplate as CT
from torchgen.code_template import CodeTemplate as CT
else:
from tools.codegen.code_template import CodeTemplate as CT
from torchgen.code_template import CodeTemplate as CT
OP_TEMPLATE = CT.from_file(
os.path.join(args.template_dir, 'aten_op_template.h'))

View File

@ -67,7 +67,7 @@ if(INTERN_BUILD_ATEN_OPS)
set_source_files_properties(${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen/MapAllocator.cpp PROPERTIES COMPILE_FLAGS "-fno-openmp")
endif()
file(GLOB_RECURSE all_python "${CMAKE_CURRENT_LIST_DIR}/../tools/codegen/*.py")
file(GLOB_RECURSE all_python "${CMAKE_CURRENT_LIST_DIR}/../torchgen/*.py")
set(GEN_ROCM_FLAG)
if(USE_ROCM)
@ -148,7 +148,7 @@ if(INTERN_BUILD_ATEN_OPS)
endif()
set(GEN_COMMAND
"${PYTHON_EXECUTABLE}" -m tools.codegen.gen
"${PYTHON_EXECUTABLE}" -m torchgen.gen
--source-path ${CMAKE_CURRENT_LIST_DIR}/../aten/src/ATen
--install_dir ${CMAKE_BINARY_DIR}/aten/src/ATen
${GEN_PER_OPERATOR_FLAG}

View File

@ -16,7 +16,7 @@ pushd "$(dirname "$0")/../../.."
cp torch/_utils_internal.py tools/shared
python -m tools.codegen.gen
python -m torchgen.gen
python tools/setup_helpers/generate_code.py \
--native-functions-path aten/src/ATen/native/native_functions.yaml

View File

@ -52,7 +52,7 @@ You will need to make sure that the entry is SORTED according to the version bum
fbcode/caffe2/torch/csrc/jit/mobile/upgrader_mobile.cpp
```
python pytorch/tools/codegen/operator_versions/gen_mobile_upgraders.py
python pytorch/torchgen/operator_versions/gen_mobile_upgraders.py
```
4. Generate the test to cover upgrader.

View File

@ -2,7 +2,7 @@
from torch.testing._internal.common_utils import TestCase, run_tests
from tools.codegen.operator_versions.gen_mobile_upgraders import (
from torchgen.operator_versions.gen_mobile_upgraders import (
sort_upgrader,
write_cpp,
)

View File

@ -9,6 +9,6 @@ def define_targets(rules):
visibility = ["//:__subpackages__"],
deps = [
rules.requirement("PyYAML"),
"//tools/codegen",
"//torchgen:torchgen",
],
)

View File

@ -1,6 +1,6 @@
from tools.codegen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
from tools.codegen.context import native_function_manager
from tools.codegen.utils import T
from torchgen.api.autograd import NativeFunctionWithDifferentiabilityInfo as NFWDI
from torchgen.context import native_function_manager
from torchgen.utils import T
import functools
from typing import Callable

View File

@ -20,11 +20,11 @@ import textwrap
from typing import Dict, List, Any
from tools.codegen.gen import parse_native_yaml
from tools.codegen.utils import FileManager
from tools.codegen.context import with_native_function
from tools.codegen.model import BaseOperatorName, NativeFunction
import tools.codegen.api.python as python
from torchgen.gen import parse_native_yaml
from torchgen.utils import FileManager
from torchgen.context import with_native_function
from torchgen.model import BaseOperatorName, NativeFunction
import torchgen.api.python as python
from .gen_python_functions import (
should_generate_py_binding,
is_py_torch_function,

View File

@ -24,13 +24,13 @@ torch/csrc/autograd/generated/
import argparse
import os
from tools.codegen.api import cpp
from tools.codegen.api.autograd import (
from torchgen.api import cpp
from torchgen.api.autograd import (
match_differentiability_info,
NativeFunctionWithDifferentiabilityInfo,
)
from tools.codegen.gen import parse_native_yaml
from tools.codegen.selective_build.selector import SelectiveBuilder
from torchgen.gen import parse_native_yaml
from torchgen.selective_build.selector import SelectiveBuilder
from typing import List
from . import gen_python_functions
from .gen_autograd_functions import (

View File

@ -8,14 +8,14 @@ from .gen_inplace_or_view_type import VIEW_FUNCTIONS
from typing import List, Sequence, Tuple
from tools.codegen.api.autograd import (
from torchgen.api.autograd import (
Derivative,
DifferentiabilityInfo,
SavedAttribute,
uses_retain_variables,
uses_single_grad,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
Binding,
BaseCType,
OptionalCType,
@ -32,9 +32,9 @@ from tools.codegen.api.types import (
ArrayRefCType,
optionalIntArrayRefT,
)
from tools.codegen.code_template import CodeTemplate
from tools.codegen.utils import FileManager
from tools.codegen.model import Argument
from torchgen.code_template import CodeTemplate
from torchgen.utils import FileManager
from torchgen.model import Argument
FUNCTION_DECLARATION = CodeTemplate(
"""\

View File

@ -4,13 +4,13 @@
# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
# The fallback is expected to mimick this codegen, so we should keep the two in sync.
from tools.codegen.api import cpp
from tools.codegen.api.autograd import (
from torchgen.api import cpp
from torchgen.api.autograd import (
NativeFunctionWithDifferentiabilityInfo,
gen_differentiable_outputs,
dispatch_strategy,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
Binding,
DispatcherSignature,
CType,
@ -21,9 +21,9 @@ from tools.codegen.api.types import (
intArrayRefT,
symIntArrayRefT,
)
from tools.codegen.code_template import CodeTemplate
from tools.codegen.context import with_native_function
from tools.codegen.model import (
from torchgen.code_template import CodeTemplate
from torchgen.context import with_native_function
from torchgen.model import (
Type,
NativeFunction,
SelfArgument,
@ -32,7 +32,7 @@ from tools.codegen.model import (
is_foreach_op,
)
from typing import List, Optional, Sequence, Tuple, Dict
from tools.codegen.utils import FileManager
from torchgen.utils import FileManager
from .context import with_native_function_with_differentiability_info
from .gen_trace_type import (
MANUAL_AUTOGRAD,

View File

@ -37,10 +37,10 @@ import yaml
from .gen_trace_type import should_trace
from tools.codegen.code_template import CodeTemplate
from tools.codegen.api import cpp
from tools.codegen.api.types import CppSignatureGroup
from tools.codegen.api.python import (
from torchgen.code_template import CodeTemplate
from torchgen.api import cpp
from torchgen.api.types import CppSignatureGroup
from torchgen.api.python import (
PythonArgument,
PythonSignature,
PythonSignatureDeprecated,
@ -57,16 +57,16 @@ from tools.codegen.api.python import (
namedtuple_fieldnames,
signature,
)
from tools.codegen.gen import cpp_string, parse_native_yaml
from tools.codegen.context import with_native_function
from tools.codegen.model import (
from torchgen.gen import cpp_string, parse_native_yaml
from torchgen.context import with_native_function
from torchgen.model import (
Argument,
BaseOperatorName,
NativeFunction,
Type,
Variant,
)
from tools.codegen.utils import split_name_params, YamlLoader, FileManager
from torchgen.utils import split_name_params, YamlLoader, FileManager
from typing import Dict, Optional, List, Tuple, Set, Sequence, Callable

View File

@ -1,12 +1,12 @@
import itertools
from typing import List, Sequence, Union, Dict
from tools.codegen.api.types import DispatcherSignature
from tools.codegen.api import cpp
from tools.codegen.code_template import CodeTemplate
from tools.codegen.context import with_native_function
from tools.codegen.utils import FileManager
from tools.codegen.model import (
from torchgen.api.types import DispatcherSignature
from torchgen.api import cpp
from torchgen.code_template import CodeTemplate
from torchgen.context import with_native_function
from torchgen.utils import FileManager
from torchgen.model import (
Argument,
NativeFunction,
SchemaKind,

View File

@ -5,13 +5,13 @@
import re
from typing import Optional, List
from tools.codegen.api.types import CppSignatureGroup
from tools.codegen.api import cpp
import tools.codegen.api.python as python
from tools.codegen.gen import parse_native_yaml
from tools.codegen.context import with_native_function
from tools.codegen.utils import mapMaybe, FileManager
from tools.codegen.model import NativeFunction, TensorOptionsArguments, Variant
from torchgen.api.types import CppSignatureGroup
from torchgen.api import cpp
import torchgen.api.python as python
from torchgen.gen import parse_native_yaml
from torchgen.context import with_native_function
from torchgen.utils import mapMaybe, FileManager
from torchgen.model import NativeFunction, TensorOptionsArguments, Variant
OPTIONAL_TYPE_PATTERN = re.compile(r"c10::optional<(.+)>")
TYPE_PATTERN = re.compile(r"(?:const\s+)?([A-Z]\w+)")

View File

@ -52,7 +52,7 @@ from .gen_inplace_or_view_type import (
AUTOGRAD_NOT_IMPLEMENTED_REGISTRATION,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
Binding,
DispatcherSignature,
BaseCType,
@ -68,7 +68,7 @@ from tools.codegen.api.types import (
TupleCType,
VectorCType,
)
from tools.codegen.api.autograd import (
from torchgen.api.autograd import (
DifferentiableInput,
NativeFunctionWithDifferentiabilityInfo,
SavedAttribute,
@ -76,11 +76,11 @@ from tools.codegen.api.autograd import (
gen_differentiable_outputs,
is_differentiable,
)
from tools.codegen.api import cpp
from tools.codegen.code_template import CodeTemplate
from tools.codegen.context import native_function_manager, with_native_function
from tools.codegen.utils import mapMaybe, FileManager
from tools.codegen.model import (
from torchgen.api import cpp
from torchgen.code_template import CodeTemplate
from torchgen.context import native_function_manager, with_native_function
from torchgen.utils import mapMaybe, FileManager
from torchgen.model import (
Argument,
NativeFunction,
SchemaKind,

View File

@ -1,19 +1,19 @@
# Parses derivatives.yaml into autograd functions
#
# Each autograd function is represented by `DifferentiabilityInfo` containing
# a list of `Derivative`. See `tools.codegen.api.autograd` for the data models.
# a list of `Derivative`. See `torchgen.api.autograd` for the data models.
from collections import defaultdict
import re
from typing import Counter, Sequence, Any, Tuple, List, Set, Dict, Match, Optional
import yaml
from tools.codegen.api.autograd import (
from torchgen.api.autograd import (
Derivative,
DifferentiabilityInfo,
SavedAttribute,
ForwardDerivative,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
Binding,
CppSignatureGroup,
NamedCType,
@ -31,10 +31,10 @@ from tools.codegen.api.types import (
OptionalCType,
stringT,
)
from tools.codegen.api import cpp
from tools.codegen.gen import parse_native_yaml, get_grouped_by_view_native_functions
from tools.codegen.context import with_native_function
from tools.codegen.model import (
from torchgen.api import cpp
from torchgen.gen import parse_native_yaml, get_grouped_by_view_native_functions
from torchgen.context import with_native_function
from torchgen.model import (
FunctionSchema,
NativeFunction,
Variant,
@ -42,7 +42,7 @@ from tools.codegen.model import (
NativeFunctionsViewGroup,
OperatorName,
)
from tools.codegen.utils import IDENT_REGEX, split_name_params, YamlLoader, concatMap
from torchgen.utils import IDENT_REGEX, split_name_params, YamlLoader, concatMap
_GLOBAL_LOAD_DERIVATIVE_CACHE = {}

View File

@ -7,7 +7,7 @@ from functools import reduce
from typing import Set, List, Any
import yaml
from tools.codegen.selective_build.selector import (
from torchgen.selective_build.selector import (
combine_selective_builders,
SelectiveBuilder,
)

View File

@ -3,15 +3,15 @@ import argparse
import os
import pathlib
from dataclasses import dataclass
from tools.codegen.api import unboxing
from tools.codegen.api.translate import translate
from tools.codegen.api.types import CppSignatureGroup
from tools.codegen.api.unboxing import convert_arguments
from tools.codegen.context import method_with_native_function
from tools.codegen.gen import parse_native_yaml, cpp_string
from tools.codegen.model import NativeFunction, NativeFunctionsGroup, Variant
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.utils import Target, FileManager, mapMaybe, make_file_manager
from torchgen.api import unboxing
from torchgen.api.translate import translate
from torchgen.api.types import CppSignatureGroup
from torchgen.api.unboxing import convert_arguments
from torchgen.context import method_with_native_function
from torchgen.gen import parse_native_yaml, cpp_string
from torchgen.model import NativeFunction, NativeFunctionsGroup, Variant
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import Target, FileManager, mapMaybe, make_file_manager
from typing import Union, Sequence
from typing_extensions import Literal

View File

@ -43,7 +43,7 @@ def run_autogen() -> None:
[
sys.executable,
"-m",
"tools.codegen.gen",
"torchgen.gen",
"-s",
"aten/src/ATen",
"-d",

View File

@ -2,8 +2,8 @@
import argparse
import os
from typing import Set
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.code_template import CodeTemplate
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.code_template import CodeTemplate
import yaml

View File

@ -2,14 +2,14 @@ import argparse
import collections
from pprint import pformat
from tools.codegen.model import Variant
from tools.codegen.api.python import (
from torchgen.model import Variant
from torchgen.api.python import (
PythonSignatureGroup,
PythonSignatureNativeFunctionPair,
returns_named_tuple_pyi,
)
from tools.codegen.gen import parse_native_yaml
from tools.codegen.utils import FileManager
from torchgen.gen import parse_native_yaml
from torchgen.utils import FileManager
from typing import Sequence, List, Dict
from tools.autograd.gen_python_functions import (

View File

@ -3,7 +3,7 @@ py_binary(
srcs = ["generate_code.py"],
deps = [
"//tools/autograd",
"//tools/codegen",
"//torchgen",
],
visibility = ["//:__pkg__"],
)

View File

@ -6,7 +6,7 @@ def define_targets(rules):
deps = [
rules.requirement("PyYAML"),
"//tools/autograd",
"//tools/codegen",
"//torchgen",
],
)

View File

@ -6,6 +6,6 @@ import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
import tools.codegen.gen
import torchgen.gen
tools.codegen.gen.main()
torchgen.gen.main()

View File

@ -38,7 +38,7 @@ def generate_code(
) -> None:
from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
from tools.autograd.gen_annotated_fn_args import gen_annotated
from tools.codegen.selective_build.selector import SelectiveBuilder
from torchgen.selective_build.selector import SelectiveBuilder
# Build ATen based Variable classes
if install_dir is None:
@ -98,7 +98,7 @@ def get_selector_from_legacy_operator_selection_list(
is_root_operator = True
is_used_for_training = True
from tools.codegen.selective_build.selector import SelectiveBuilder
from torchgen.selective_build.selector import SelectiveBuilder
selector = SelectiveBuilder.from_legacy_op_registration_allow_list(
selected_op_list,
@ -116,7 +116,7 @@ def get_selector(
# cwrap depends on pyyaml, so we can't import it earlier
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
from tools.codegen.selective_build.selector import SelectiveBuilder
from torchgen.selective_build.selector import SelectiveBuilder
assert not (
selected_op_list_path is not None and operators_yaml_path is not None
@ -203,8 +203,8 @@ def main() -> None:
assert os.path.isfile(
ts_native_functions
), f"Unable to access {ts_native_functions}"
from tools.codegen.gen_lazy_tensor import run_gen_lazy_tensor
from tools.codegen.dest.lazy_ir import GenTSLazyIR
from torchgen.gen_lazy_tensor import run_gen_lazy_tensor
from torchgen.dest.lazy_ir import GenTSLazyIR
run_gen_lazy_tensor(
aten_path=aten_path,

View File

@ -4,12 +4,12 @@ import unittest
from tools.autograd import gen_autograd_functions
from tools.autograd import load_derivatives
import tools.codegen.model
import torchgen.model
class TestCreateDerivative(unittest.TestCase):
def test_named_grads(self) -> None:
schema = tools.codegen.model.FunctionSchema.parse(
schema = torchgen.model.FunctionSchema.parse(
"func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
@ -24,7 +24,7 @@ class TestCreateDerivative(unittest.TestCase):
def test_non_differentiable_output(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
schema = tools.codegen.model.FunctionSchema.parse(specification)
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
differentiability_info = load_derivatives.create_differentiability_info(
@ -46,7 +46,7 @@ class TestCreateDerivative(unittest.TestCase):
)
def test_indexed_grads(self) -> None:
schema = tools.codegen.model.FunctionSchema.parse(
schema = torchgen.model.FunctionSchema.parse(
"func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
@ -61,7 +61,7 @@ class TestCreateDerivative(unittest.TestCase):
def test_named_grads_and_indexed_grads(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
schema = tools.codegen.model.FunctionSchema.parse(specification)
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
with self.assertRaisesRegex(
@ -84,7 +84,7 @@ class TestCreateDerivative(unittest.TestCase):
class TestGenAutogradFunctions(unittest.TestCase):
def test_non_differentiable_output_invalid_type(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
schema = tools.codegen.model.FunctionSchema.parse(specification)
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
differentiability_info = load_derivatives.create_differentiability_info(
@ -107,7 +107,7 @@ class TestGenAutogradFunctions(unittest.TestCase):
def test_non_differentiable_output_output_differentiability(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y, Tensor z)"
schema = tools.codegen.model.FunctionSchema.parse(specification)
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
differentiability_info = load_derivatives.create_differentiability_info(
@ -132,8 +132,8 @@ class TestGenAutogradFunctions(unittest.TestCase):
# Represents the most basic NativeFunction. Use dataclasses.replace()
# to edit for use.
DEFAULT_NATIVE_FUNCTION, _ = tools.codegen.model.NativeFunction.from_yaml(
{"func": "func() -> bool"}, loc=tools.codegen.model.Location(__file__, 1)
DEFAULT_NATIVE_FUNCTION, _ = torchgen.model.NativeFunction.from_yaml(
{"func": "func() -> bool"}, loc=torchgen.model.Location(__file__, 1)
)

View File

@ -5,10 +5,10 @@ import unittest
import yaml
import textwrap
from tools.codegen.model import NativeFunctionsGroup, DispatchKey
import tools.codegen.dest as dest
import tools.codegen.gen as gen
from tools.codegen.gen import LineLoader, parse_native_yaml_struct
from torchgen.model import NativeFunctionsGroup, DispatchKey
import torchgen.dest as dest
import torchgen.gen as gen
from torchgen.gen import LineLoader, parse_native_yaml_struct
class TestCodegenModel(expecttest.TestCase):

View File

@ -5,11 +5,11 @@ import tempfile
import unittest
import expecttest
from tools.codegen.gen_backend_stubs import run
from tools.codegen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE # noqa: F401
from torchgen.gen_backend_stubs import run
from torchgen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE # noqa: F401
path = os.path.dirname(os.path.realpath(__file__))
gen_backend_stubs_path = os.path.join(path, "../tools/codegen/gen_backend_stubs.py")
gen_backend_stubs_path = os.path.join(path, "../torchgen/gen_backend_stubs.py")
# gen_backend_stubs.py is an integration point that is called directly by external backends.
# The tests here are to confirm that badly formed inputs result in reasonable error messages.

View File

@ -145,7 +145,7 @@ When making changes to the operators, the first thing to identify is if it's BC/
5. After [rebuilding PyTorch](https://github.com/pytorch/pytorch#from-source), run the following command to auto update the file [`torch/csrc/jit/mobile/upgrader_mobile.cpp`](https://github.com/pytorch/pytorch/blob/8757e21c6a4fc00e83539aa7f9c28eb11eff53c1/torch/csrc/jit/mobile/upgrader_mobile.cpp). After rebuild PyTorch from source (`python setup.py`), run
```
python pytorch/tools/codegen/operator_versions/gen_mobile_upgraders.py
python pytorch/torchgen/operator_versions/gen_mobile_upgraders.py
```
6. Add a test. With the model generated from step 1, you will need to add tests in `test/test_save_load_for_op_versions.py`. Following is an example to write a test

View File

@ -3,7 +3,7 @@
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python tools/codegen/decompositions/gen_jit_decompositions.py
* cd ~/pytorch && python torchgen/decompositions/gen_jit_decompositions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python3
import argparse
from tools.codegen.gen import parse_native_yaml, FileManager
import tools.codegen.model as model
from torchgen.gen import parse_native_yaml, FileManager
import torchgen.model as model
def num_leading_spaces(line: str) -> int:
return len(line) - len(line.lstrip())

View File

@ -1,4 +1,4 @@
// @generated by tools/codegen/gen.py from
// @generated by torchgen/gen.py from
// external_functions_codegen_template.cpp
#include <torch/csrc/jit/tensorexpr/external_functions.h>

View File

@ -3,7 +3,7 @@ This folder contains generated sources for the lazy torchscript backend.
The main input file that drives which operators get codegen support for torchscript backend is
[../../../../aten/src/ATen/native/ts_native_functions.yaml](../../../../aten/src/ATen/native/ts_native_functions.yaml)
The code generator lives at `tools/codegen/gen_lazy_tensor.py`.
The code generator lives at `torchgen/gen_lazy_tensor.py`.
It is called automatically by the torch autograd codegen (`tools/setup_helpers/generate_code.py`)
as a part of the build process in OSS builds (CMake/Bazel) and Buck.
@ -12,7 +12,7 @@ External backends (e.g. torch/xla) call `gen_lazy_tensor.py` directly,
and feed it command line args indicating where the output files should go.
For more information on codegen, see these resources:
* Info about lazy tensor codegen: [gen_lazy_tensor.py docs](../../../../tools/codegen/gen_lazy_tensor.py)
* Info about lazy tensor codegen: [gen_lazy_tensor.py docs](../../../../torchgen/gen_lazy_tensor.py)
* Lazy TorchScript backend native functions: [ts_native_functions.yaml](../../../../aten/src/ATen/native/ts_native_functions.yaml)
* Source of truth for native func definitions [ATen native_functions.yaml](../../../../aten/src/ATen/native/native_functions.yaml)
* Info about native functions [ATen nativefunc README.md](../../../../aten/src/ATen/native/README.md)

View File

@ -2,15 +2,15 @@ from dataclasses import dataclass
import re
from typing import Optional, Sequence, Set, List, Tuple, Match
from tools.codegen.api import cpp
from tools.codegen.api.types import Binding, NamedCType
from tools.codegen.model import (
from torchgen.api import cpp
from torchgen.api.types import Binding, NamedCType
from torchgen.model import (
NativeFunction,
Type,
SchemaKind,
NativeFunctionsViewGroup,
)
from tools.codegen.utils import IDENT_REGEX
from torchgen.utils import IDENT_REGEX
# Represents a saved attribute involved in backward calculation.
# Note that it can be a derived property of an input argument, e.g.:

View File

@ -1,4 +1,4 @@
from tools.codegen.model import (
from torchgen.model import (
Argument,
Arguments,
BaseTy,
@ -12,7 +12,7 @@ from tools.codegen.model import (
TensorOptionsArguments,
Type,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
ArgName,
BaseCType,
Binding,
@ -40,8 +40,8 @@ from tools.codegen.api.types import (
tensorOptionsT,
symIntArrayRefT,
)
from tools.codegen import local
from tools.codegen.utils import assert_never
from torchgen import local
from torchgen.utils import assert_never
from typing import Optional, Sequence, Union, List, Set
# This file describes the translation of JIT schema to the public C++

View File

@ -1,4 +1,4 @@
from tools.codegen.model import (
from torchgen.model import (
Argument,
FunctionSchema,
Return,
@ -7,9 +7,9 @@ from tools.codegen.model import (
Type,
)
from tools.codegen.api.types import ArgName, Binding, NamedCType, CType
from tools.codegen.api import cpp
from tools.codegen.utils import concatMap, assert_never
from torchgen.api.types import ArgName, Binding, NamedCType, CType
from torchgen.api import cpp
from torchgen.utils import concatMap, assert_never
import itertools
from typing import Sequence, List, Union

View File

@ -1,11 +1,11 @@
from tools.codegen.model import (
from torchgen.model import (
FunctionSchema,
BaseTy,
BaseType,
NativeFunctionsViewGroup,
Argument,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
Binding,
NamedCType,
ConstRefCType,
@ -15,7 +15,7 @@ from tools.codegen.api.types import (
longT,
boolT,
)
from tools.codegen.api import dispatcher
from torchgen.api import dispatcher
from typing import List, Optional

View File

@ -1,5 +1,5 @@
from typing import List, Union, Tuple, Optional
from tools.codegen.model import (
from torchgen.model import (
Type,
BaseTy,
BaseType,
@ -11,7 +11,7 @@ from tools.codegen.model import (
TensorOptionsArguments,
Argument,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
CType,
BaseCppType,
BaseCType,

View File

@ -1,4 +1,4 @@
from tools.codegen.model import NativeFunctionsGroup
from torchgen.model import NativeFunctionsGroup
# Follows dispatcher calling convention, but:
# - Mutable arguments not allowed. Meta functions are always

View File

@ -1,4 +1,4 @@
from tools.codegen.model import (
from torchgen.model import (
Argument,
FunctionSchema,
Return,
@ -7,7 +7,7 @@ from tools.codegen.model import (
Type,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
ArgName,
BaseCType,
Binding,
@ -24,9 +24,9 @@ from tools.codegen.api.types import (
boolT,
scalarTypeT,
)
from tools.codegen.api import cpp
from tools.codegen import local
from tools.codegen.utils import assert_never
from torchgen.api import cpp
from torchgen import local
from torchgen.utils import assert_never
from typing import Union, Sequence, List, Optional

View File

@ -1,10 +1,10 @@
from dataclasses import dataclass
from typing import Optional, Union, Sequence, Set, List, Dict, Tuple
from tools.codegen.api.types import Binding, CppSignature, CppSignatureGroup
from tools.codegen.api import cpp
from tools.codegen.gen import pythonify_default
from tools.codegen.model import (
from torchgen.api.types import Binding, CppSignature, CppSignatureGroup
from torchgen.api import cpp
from torchgen.gen import pythonify_default
from torchgen.model import (
Argument,
BaseTy,
BaseType,

View File

@ -1,4 +1,4 @@
from tools.codegen.model import (
from torchgen.model import (
Argument,
BaseTy,
BaseType,
@ -10,7 +10,7 @@ from tools.codegen.model import (
Type,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
ArgName,
BaseCType,
Binding,
@ -28,8 +28,8 @@ from tools.codegen.api.types import (
iTensorListRefT,
)
from tools.codegen.api import cpp
from tools.codegen.utils import assert_never
from torchgen.api import cpp
from torchgen.utils import assert_never
from typing import Union, List
@ -65,7 +65,7 @@ def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
elif isinstance(t, ListType):
if t.elem == BaseType(BaseTy.Tensor):
return NamedCType(binds, BaseCType(iTensorListRefT))
# TODO: delete these special cases; see tools.codegen.api.cpp--these
# TODO: delete these special cases; see torchgen.api.cpp--these
# must be changed in tandem, but there are problems; see
# https://github.com/pytorch/pytorch/pull/51485
elif str(t.elem) == "int":

View File

@ -1,5 +1,5 @@
from typing import Dict, Sequence, List, NoReturn, Union
from tools.codegen.api.types import (
from torchgen.api.types import (
tensorListT,
BaseCType,
Binding,
@ -204,7 +204,7 @@ When I failed, the following bindings were available in the context:
{ctx_desc}
This probably means there is a missing rule in the rules of tools.codegen.api.translate.
This probably means there is a missing rule in the rules of torchgen.api.translate.
Check this module for more information.
"""
)

View File

@ -1,4 +1,4 @@
from tools.codegen.model import (
from torchgen.model import (
Argument,
FunctionSchema,
NativeFunction,
@ -327,7 +327,7 @@ class NamedCType:
# We don't distinguish between binding sites for different APIs;
# instead, all of the important distinctions are encoded in CType,
# which you can use to figure out if a given Binding is appropriate
# for use in another context. (See tools.codegen.api.translate)
# for use in another context. (See torchgen.api.translate)
@dataclass(frozen=True)
@ -746,7 +746,7 @@ def kernel_signature(
# Functions only, no types
from tools.codegen.api import (
from torchgen.api import (
cpp,
dispatcher,
native,

View File

@ -1,4 +1,4 @@
from tools.codegen.model import (
from torchgen.model import (
Argument,
BaseTy,
BaseType,
@ -8,8 +8,8 @@ from tools.codegen.model import (
DispatchKey,
)
import tools.codegen.api.types as api_types
from tools.codegen.api.types import (
import torchgen.api.types as api_types
from torchgen.api.types import (
ArgName,
BaseCType,
Binding,
@ -20,7 +20,7 @@ from tools.codegen.api.types import (
BaseCppType,
)
from tools.codegen.api import cpp, structured
from torchgen.api import cpp, structured
from dataclasses import dataclass
from typing import List, Optional

View File

@ -1,8 +1,8 @@
from typing import List, Tuple
from tools.codegen.api import cpp
from tools.codegen.api.types import Binding, CType, CppSignatureGroup
from tools.codegen.model import (
from torchgen.api import cpp
from torchgen.api.types import Binding, CType, CppSignatureGroup
from torchgen.model import (
Argument,
NativeFunction,
Type,

View File

@ -1,6 +1,6 @@
def define_targets(rules):
rules.py_library(
name = "codegen",
name = "torchgen",
srcs = rules.glob(["**/*.py"]),
deps = [
rules.requirement("PyYAML"),
@ -11,6 +11,6 @@ def define_targets(rules):
rules.py_binary(
name = "gen",
srcs = [":codegen"],
srcs = [":torchgen"],
visibility = ["//visibility:public"],
)

View File

@ -1,12 +1,12 @@
from tools.codegen.utils import S, T, context
from tools.codegen.model import (
from torchgen.utils import S, T, context
from torchgen.model import (
NativeFunction,
NativeFunctionsGroup,
NativeFunctionsViewGroup,
BackendIndex,
DispatchKey,
)
import tools.codegen.local as local
import torchgen.local as local
import functools
from typing import TypeVar, Union, Iterator, Callable, Dict, Optional

View File

@ -4,14 +4,14 @@ from pathlib import Path
from torch.jit._decompositions import decomposition_table
# from tools.codegen.code_template import CodeTemplate
# from torchgen.code_template import CodeTemplate
DECOMP_HEADER = r"""
/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python tools/codegen/decompositions/gen_jit_decompositions.py
* cd ~/pytorch && python torchgen/decompositions/gen_jit_decompositions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>

View File

@ -1,22 +1,22 @@
from abc import ABC
from typing import List, Union
from dataclasses import dataclass
from tools.codegen.context import method_with_native_function
from tools.codegen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
from tools.codegen.api.types import (
from torchgen.context import method_with_native_function
from torchgen.model import BackendIndex, NativeFunction, NativeFunctionsGroup
from torchgen.api.types import (
BaseCType,
OptionalCType,
VectorCType,
kernel_signature,
)
import tools.codegen.api.dispatcher as dispatcher
from tools.codegen.api.lazy import (
import torchgen.api.dispatcher as dispatcher
from torchgen.api.lazy import (
LazyIrSchema,
LazyArgument,
isValueType,
tensorListValueT,
)
from tools.codegen.dest.lazy_ts_lowering import ts_lowering_body
from torchgen.dest.lazy_ts_lowering import ts_lowering_body
def node_ctor_arg_rvalue_string(arg: LazyArgument) -> str:

View File

@ -1,7 +1,7 @@
from typing import Union
from tools.codegen.model import NativeFunction, NativeFunctionsGroup
from tools.codegen.api.lazy import LazyIrSchema
from tools.codegen.api.types import OptionalCType
from torchgen.model import NativeFunction, NativeFunctionsGroup
from torchgen.api.lazy import LazyIrSchema
from torchgen.api.types import OptionalCType
def ts_lowering_body(f: Union[NativeFunctionsGroup, NativeFunction]) -> str:

View File

@ -1,11 +1,11 @@
from typing import List, Union, Optional
from tools.codegen.context import with_native_function_and_index
from tools.codegen.utils import mapMaybe
from tools.codegen.model import NativeFunction, NativeFunctionsGroup, BackendIndex
from tools.codegen.api.types import kernel_signature
import tools.codegen.api.meta as meta
import tools.codegen.api.structured as structured
from torchgen.context import with_native_function_and_index
from torchgen.utils import mapMaybe
from torchgen.model import NativeFunction, NativeFunctionsGroup, BackendIndex
from torchgen.api.types import kernel_signature
import torchgen.api.meta as meta
import torchgen.api.structured as structured
@with_native_function_and_index

View File

@ -4,9 +4,9 @@ from typing_extensions import Literal
from dataclasses import dataclass
import textwrap
from tools.codegen.context import method_with_native_function, native_function_manager
from tools.codegen.utils import Target, mapMaybe, assert_never
from tools.codegen.model import (
from torchgen.context import method_with_native_function, native_function_manager
from torchgen.utils import Target, mapMaybe, assert_never
from torchgen.model import (
DispatchKey,
NativeFunction,
NativeFunctionsGroup,
@ -18,7 +18,7 @@ from tools.codegen.model import (
BackendIndex,
gets_generated_out_inplace_wrapper,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
BaseCType,
Binding,
ConstRefCType,
@ -32,11 +32,11 @@ from tools.codegen.api.types import (
NamedCType,
DispatcherSignature,
)
import tools.codegen.api.meta as meta
import tools.codegen.api.cpp as cpp
import tools.codegen.api.structured as structured
from tools.codegen.api.translate import translate
from tools.codegen.selective_build.selector import SelectiveBuilder
import torchgen.api.meta as meta
import torchgen.api.cpp as cpp
import torchgen.api.structured as structured
from torchgen.api.translate import translate
from torchgen.selective_build.selector import SelectiveBuilder
def gen_registration_headers(

View File

@ -1,7 +1,7 @@
from dataclasses import dataclass
from typing import Union, Optional, List, Tuple, Dict, Sequence
from tools.codegen.api.translate import translate
from tools.codegen.model import (
from torchgen.api.translate import translate
from torchgen.model import (
NativeFunctionsGroup,
ScalarType,
UfuncKey,
@ -10,9 +10,9 @@ from tools.codegen.model import (
BaseTy,
Argument,
)
import tools.codegen.api.ufunc as ufunc
from tools.codegen.api.ufunc import UfunctorBindings
from tools.codegen.api.types import (
import torchgen.api.ufunc as ufunc
from torchgen.api.ufunc import UfunctorBindings
from torchgen.api.types import (
StructuredImplSignature,
scalar_t,
opmath_t,
@ -24,7 +24,7 @@ from tools.codegen.api.types import (
ScalarTypeToCppMapping,
VectorizedCType,
)
from tools.codegen.context import with_native_function
from torchgen.context import with_native_function
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
#

View File

@ -8,7 +8,7 @@ import pathlib
import json
from dataclasses import dataclass
from tools.codegen.model import (
from torchgen.model import (
STRUCTURED_DISPATCH_KEYS,
Argument,
DispatchKey,
@ -33,7 +33,7 @@ from tools.codegen.model import (
BaseOperatorName,
Tag,
)
from tools.codegen.api.types import (
from torchgen.api.types import (
Binding,
CppSignatureGroup,
DispatcherSignature,
@ -41,15 +41,15 @@ from tools.codegen.api.types import (
NativeSignature,
SpecialArgName,
)
from tools.codegen.api import cpp
import tools.codegen.api.dispatcher as dispatcher
import tools.codegen.api.native as native
import tools.codegen.api.meta as meta
import tools.codegen.api.structured as structured
from tools.codegen.api.translate import translate
from tools.codegen.code_template import CodeTemplate
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.utils import (
from torchgen.api import cpp
import torchgen.api.dispatcher as dispatcher
import torchgen.api.native as native
import torchgen.api.meta as meta
import torchgen.api.structured as structured
from torchgen.api.translate import translate
from torchgen.code_template import CodeTemplate
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import (
Target,
concatMap,
context,
@ -60,14 +60,14 @@ from tools.codegen.utils import (
assert_never,
make_file_manager,
)
from tools.codegen.context import (
from torchgen.context import (
method_with_native_function,
native_function_manager,
with_native_function_and_indices,
with_native_function,
)
import tools.codegen.dest as dest
from tools.codegen.gen_functionalization_type import (
import torchgen.dest as dest
from torchgen.gen_functionalization_type import (
gen_functionalization_definition,
gen_functionalization_registration,
gen_functionalization_view_inverse_declaration,
@ -2281,7 +2281,7 @@ def main() -> None:
#include <ATen/hip/HIPDevice.h>
#include <ATen/hip/HIPContext.h>"""
from tools.codegen.model import dispatch_keys
from torchgen.model import dispatch_keys
# Only a limited set of dispatch keys get CPUFunctions.h headers generated
# for them; this is the set

View File

@ -5,12 +5,12 @@ import yaml
import re
from collections import namedtuple, Counter, defaultdict
from typing import List, Dict, Union, Sequence, Optional
from tools.codegen.gen import (
from torchgen.gen import (
get_grouped_native_functions,
parse_native_yaml,
NamespaceHelper,
)
from tools.codegen.model import (
from torchgen.model import (
BackendIndex,
BackendMetadata,
DispatchKey,
@ -18,13 +18,13 @@ from tools.codegen.model import (
NativeFunctionsGroup,
OperatorName,
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.utils import Target, concatMap, context, YamlLoader, FileManager
from tools.codegen.context import native_function_manager
from tools.codegen.code_template import CodeTemplate
import tools.codegen.dest as dest
import tools.codegen.api.dispatcher as dispatcher
from tools.codegen.api.types import DispatcherSignature
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import Target, concatMap, context, YamlLoader, FileManager
from torchgen.context import native_function_manager
from torchgen.code_template import CodeTemplate
import torchgen.dest as dest
import torchgen.api.dispatcher as dispatcher
from torchgen.api.types import DispatcherSignature
# Parses the external backend's yaml, and adds a new BackendIndex for the backend's dispatch key.
@ -473,8 +473,8 @@ def run(
source_yaml: str, output_dir: str, dry_run: bool, impl_path: Optional[str] = None
) -> None:
# Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py
pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
pytorch_root = pathlib.Path(__file__).parent.parent.absolute()
template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")
def make_file_manager(install_dir: str) -> FileManager:

View File

@ -1,18 +1,18 @@
from tools.codegen.api import cpp
from tools.codegen.api.types import (
from torchgen.api import cpp
from torchgen.api.types import (
DispatcherSignature,
Binding,
FunctionalizationLambda,
ViewInverseSignature,
NativeSignature,
)
from tools.codegen.api.translate import translate
from tools.codegen.context import (
from torchgen.api.translate import translate
from torchgen.context import (
with_native_function,
with_native_function_and,
native_function_manager,
)
from tools.codegen.model import (
from torchgen.model import (
Argument,
NativeFunction,
SchemaKind,
@ -26,7 +26,7 @@ from tools.codegen.model import (
NativeFunctionsViewGroup,
ListType,
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from torchgen.selective_build.selector import SelectiveBuilder
from typing import List, Optional, Union, Tuple

View File

@ -16,21 +16,21 @@ from typing import (
Tuple,
Type,
)
from tools.codegen.dest.lazy_ir import GenLazyIR, GenTSLazyIR
from tools.codegen.gen import (
from torchgen.dest.lazy_ir import GenLazyIR, GenTSLazyIR
from torchgen.gen import (
get_grouped_native_functions,
parse_native_yaml,
NamespaceHelper,
)
from tools.codegen.model import (
from torchgen.model import (
FunctionSchema,
NativeFunction,
NativeFunctionsGroup,
OperatorName,
)
from tools.codegen.selective_build.selector import SelectiveBuilder
from tools.codegen.utils import concatMap, YamlLoader, FileManager
import tools.codegen.dest as dest
from torchgen.selective_build.selector import SelectiveBuilder
from torchgen.utils import concatMap, YamlLoader, FileManager
import torchgen.dest as dest
from .gen_backend_stubs import (
parse_backend_yaml,
error_on_missing_kernels,
@ -225,7 +225,7 @@ def main() -> None:
)
options = parser.parse_args()
# Assumes that this file lives at PYTORCH_ROOT/tools/codegen/gen_backend_stubs.py
# Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
torch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
aten_path = str(torch_root / "aten" / "src" / "ATen")
lazy_ir_generator: Type[GenLazyIR] = default_args.lazy_ir_generator

View File

@ -1,6 +1,6 @@
import re
from tools.codegen.utils import assert_never
from torchgen.utils import assert_never
from dataclasses import dataclass
from typing import List, Dict, Optional, Iterator, Tuple, Set, Sequence, Callable, Union
@ -258,7 +258,7 @@ DTYPE_CLASSES["FloatingAndComplex"] = (
# Represents the valid entries for ufunc_inner_loop in native_functions.yaml.
# NB: if you add a new UfuncKey, you will teach tools.codegen.dest.ufunc how
# NB: if you add a new UfuncKey, you will teach torchgen.dest.ufunc how
# to process it. Most logic will ignore keys they don't understand, so your
# new key will get silently ignored until you hook in logic to deal with it.
class UfuncKey(Enum):
@ -518,7 +518,7 @@ class NativeFunction:
assert tag_str is None or isinstance(tag_str, str), f"not a str: {tag_str}"
tag = Tag.parse(tag_str) if tag_str else None
from tools.codegen.api import cpp
from torchgen.api import cpp
raw_dispatch = e.pop("dispatch", None)
assert raw_dispatch is None or isinstance(raw_dispatch, dict), e
@ -2132,4 +2132,4 @@ class Precompute:
return replace_list
import tools.codegen.api.ufunc as ufunc
import torchgen.api.ufunc as ufunc

View File

@ -5,9 +5,9 @@ from pathlib import Path
from typing import Any, Dict, List
import torch
from tools.codegen.code_template import CodeTemplate
from torchgen.code_template import CodeTemplate
from torch.jit.generate_bytecode import generate_upgraders_bytecode
from tools.codegen.operator_versions.gen_mobile_upgraders_constant import (
from torchgen.operator_versions.gen_mobile_upgraders_constant import (
MOBILE_UPGRADERS_HEADER_DESCRIPTION,
)

View File

@ -2,6 +2,6 @@ MOBILE_UPGRADERS_HEADER_DESCRIPTION = """/**
* @generated
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python tools/codegen/operator_versions/gen_mobile_upgraders.py
* cd ~/pytorch && python torchgen/operator_versions/gen_mobile_upgraders.py
*/
"""

View File

@ -3,8 +3,8 @@ import yaml
from dataclasses import dataclass
from tools.codegen.model import NativeFunction
from tools.codegen.selective_build.operator import (
from torchgen.model import NativeFunction
from torchgen.selective_build.operator import (
SelectiveBuildOperator,
merge_debug_info,
merge_operator_dicts,

View File

@ -10,7 +10,7 @@ SHAPE_HEADER = r"""
* This is an auto-generated file. Please do not modify it by hand.
* To re-generate, please run:
* cd ~/pytorch && python
* tools/codegen/shape_functions/gen_jit_shape_functions.py
* torchgen/shape_functions/gen_jit_shape_functions.py
*/
#include <torch/csrc/jit/jit_log.h>
#include <torch/csrc/jit/passes/inliner.h>

View File

@ -1,4 +1,4 @@
from tools.codegen.model import NativeFunctionsGroup
from torchgen.model import NativeFunctionsGroup
from typing import Dict

View File

@ -1,7 +1,7 @@
from tools.codegen import gen
from tools.codegen.context import native_function_manager
from tools.codegen.model import NativeFunctionsGroup
from tools.codegen.static_runtime import gen_structured
from torchgen import gen
from torchgen.context import native_function_manager
from torchgen.model import NativeFunctionsGroup
from torchgen.static_runtime import gen_structured
import argparse
import itertools

View File

@ -1,6 +1,6 @@
import tools.codegen.api.cpp as cpp
from tools.codegen.context import native_function_manager
from tools.codegen.model import (
import torchgen.api.cpp as cpp
from torchgen.context import native_function_manager
from torchgen.model import (
Argument,
BaseTy,
FunctionSchema,
@ -11,7 +11,7 @@ from tools.codegen.model import (
TensorOptionsArguments,
Type,
)
from tools.codegen.static_runtime import config
from torchgen.static_runtime import config
import math
from typing import List, Optional, Sequence, Tuple, Union

View File

@ -22,7 +22,7 @@ from typing import (
)
from enum import Enum
from tools.codegen.code_template import CodeTemplate
from torchgen.code_template import CodeTemplate
# Safely load fast C Yaml loader/dumper if they are available
try:
@ -185,7 +185,7 @@ class FileManager:
if isinstance(env, dict):
# TODO: Update the comment reference to the correct location
if "generated_comment" not in env:
comment = "@" + "generated by tools/codegen/gen.py"
comment = "@" + "generated by torchgen/gen.py"
comment += " from {}".format(os.path.basename(template_fn))
env["generated_comment"] = comment
template = _read_template(os.path.join(self.template_dir, template_fn))