Files
pytorch/tools/test/test_codegen.py
Edward Yang 36420b5e8c Rename tools/codegen to torchgen (#76275)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/76275

In preparation for addressing
https://github.com/pytorch/pytorch/issues/73212

Diff was generated with:

```
git mv tools/codegen torchgen
git grep -l 'tools.codegen' | xargs sed -i 's/tools.codegen/torchgen/g'
sed -i "s/\${TOOLS_PATH}\/codegen/\${TORCH_ROOT}\/torchgen/g" caffe2/CMakeLists.txt
```

and a manual edits to:

* tools/test/test_gen_backend_stubs.py
* torchgen/build.bzl
* torchgen/gen_backend_stubs.py

aka this diff:

```
 diff --git a/tools/test/test_gen_backend_stubs.py b/tools/test/test_gen_backend_stubs.py
index 3dc26c6d2d..104054575e 100644
 --- a/tools/test/test_gen_backend_stubs.py
+++ b/tools/test/test_gen_backend_stubs.py
@@ -9,7 +9,7 @@ from torchgen.gen_backend_stubs import run
 from torchgen.gen import _GLOBAL_PARSE_NATIVE_YAML_CACHE  # noqa: F401

 path = os.path.dirname(os.path.realpath(__file__))
-gen_backend_stubs_path = os.path.join(path, '../torchgen/gen_backend_stubs.py')
+gen_backend_stubs_path = os.path.join(path, '../../torchgen/gen_backend_stubs.py')

 # gen_backend_stubs.py is an integration point that is called directly by external backends.
 # The tests here are to confirm that badly formed inputs result in reasonable error messages.
 diff --git a/torchgen/build.bzl b/torchgen/build.bzl
index ed04e35a43..d00078a3cf 100644
 --- a/torchgen/build.bzl
+++ b/torchgen/build.bzl
@@ -1,6 +1,6 @@
 def define_targets(rules):
     rules.py_library(
-        name = "codegen",
+        name = "torchgen",
         srcs = rules.glob(["**/*.py"]),
         deps = [
             rules.requirement("PyYAML"),
@@ -11,6 +11,6 @@ def define_targets(rules):

     rules.py_binary(
         name = "gen",
-        srcs = [":codegen"],
+        srcs = [":torchgen"],
         visibility = ["//visibility:public"],
     )
 diff --git a/torchgen/gen_backend_stubs.py b/torchgen/gen_backend_stubs.py
index c1a672a655..beee7a15e0 100644
 --- a/torchgen/gen_backend_stubs.py
+++ b/torchgen/gen_backend_stubs.py
@@ -474,7 +474,7 @@ def run(
 ) -> None:

     # Assumes that this file lives at PYTORCH_ROOT/torchgen/gen_backend_stubs.py
-    pytorch_root = pathlib.Path(__file__).parent.parent.parent.absolute()
+    pytorch_root = pathlib.Path(__file__).parent.parent.absolute()
     template_dir = os.path.join(pytorch_root, "aten/src/ATen/templates")

     def make_file_manager(install_dir: str) -> FileManager:
```

run_all_fbandroid_tests

Test Plan: sandcastle

Reviewed By: albanD, ngimel

Differential Revision: D35770317

fbshipit-source-id: 153ac4a7fef15b1e750812a90bfafdbc8f1ebcdf
(cherry picked from commit c6d485d1d4648fa1c8a4c14c5bf3d8e899b9b4dd)
2022-04-25 01:38:06 +00:00

142 lines
5.7 KiB
Python

import dataclasses
import typing
import unittest
from tools.autograd import gen_autograd_functions
from tools.autograd import load_derivatives
import torchgen.model
class TestCreateDerivative(unittest.TestCase):
def test_named_grads(self) -> None:
schema = torchgen.model.FunctionSchema.parse(
"func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
derivative = load_derivatives.create_derivative(
native_function,
formula="func_backward(grad_x, grad_y)",
var_names=(),
available_named_gradients=["grad_x", "grad_y"],
)
self.assertSetEqual(derivative.named_gradients, {"grad_x", "grad_y"})
def test_non_differentiable_output(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
differentiability_info = load_derivatives.create_differentiability_info(
defn={
"name": specification,
"a": "grads[0]",
"b": "grads[2]",
},
functions_by_signature={schema.signature(): [native_function]},
functions_by_schema={specification: native_function},
op_counter=typing.Counter[str](),
)
self.assertSequenceEqual(
differentiability_info.available_named_gradients,
# grad_y is not present because y is a
# bool and thus not differentiable.
["grad_x", "grad_z"],
)
def test_indexed_grads(self) -> None:
schema = torchgen.model.FunctionSchema.parse(
"func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
derivative = load_derivatives.create_derivative(
native_function,
formula="func_backward(grads[0], grads[1])",
var_names=(),
available_named_gradients=["grad_x", "grad_y"],
)
self.assertSetEqual(derivative.named_gradients, set())
def test_named_grads_and_indexed_grads(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y)"
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
with self.assertRaisesRegex(
RuntimeError, 'illegally mixes use of "grad_RETURN_NAME"'
):
load_derivatives.create_differentiability_info(
defn={
"name": specification,
# Uh-oh, the derivatives reference gradients by
# name and by index.
"a": "grad_x",
"b": "grads[1]",
},
functions_by_signature={schema.signature(): [native_function]},
functions_by_schema={specification: native_function},
op_counter=typing.Counter[str](),
)
class TestGenAutogradFunctions(unittest.TestCase):
def test_non_differentiable_output_invalid_type(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, bool y, Tensor z)"
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
differentiability_info = load_derivatives.create_differentiability_info(
defn={
"name": specification,
"a": "grad_x",
"b": "grad_z",
},
functions_by_signature={schema.signature(): [native_function]},
functions_by_schema={specification: native_function},
op_counter=typing.Counter[str](),
)
definition = gen_autograd_functions.process_function(
differentiability_info, gen_autograd_functions.FUNCTION_DEFINITION
)
# grad_z should map to grads[1], not grads[2] because output 1
# (y) is not differentiable.
assert "grad_z = grads[2]" not in definition
assert "grad_z = grads[1]" in definition
def test_non_differentiable_output_output_differentiability(self) -> None:
specification = "func(Tensor a, Tensor b) -> (Tensor x, Tensor y, Tensor z)"
schema = torchgen.model.FunctionSchema.parse(specification)
native_function = dataclasses.replace(DEFAULT_NATIVE_FUNCTION, func=schema)
differentiability_info = load_derivatives.create_differentiability_info(
defn={
"name": specification,
"a": "grad_x",
"b": "grad_z",
"output_differentiability": [True, False, True],
},
functions_by_signature={schema.signature(): [native_function]},
functions_by_schema={specification: native_function},
op_counter=typing.Counter[str](),
)
definition = gen_autograd_functions.process_function(
differentiability_info, gen_autograd_functions.FUNCTION_DEFINITION
)
# grad_z should map to grads[1], not grads[2] because output 1
# (y) is not differentiable.
assert "grad_z = grads[2]" not in definition
assert "grad_z = grads[1]" in definition
# Represents the most basic NativeFunction. Use dataclasses.replace()
# to edit for use.
DEFAULT_NATIVE_FUNCTION, _ = torchgen.model.NativeFunction.from_yaml(
{"func": "func() -> bool"}, loc=torchgen.model.Location(__file__, 1)
)
if __name__ == "__main__":
unittest.main()