mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/41575 Fixes https://github.com/pytorch/pytorch/issues/34294 This updates the C++ argument parser to correctly handle `TensorList` operands. I've also included a number of updates to the testing infrastructure, this is because we're now doing a much more careful job of testing the signatures of aten kernels, using the type information about the arguments as read in from `Declarations.yaml`. The changes to the tests are required because we're now only checking for `__torch_function__` attributes on `Tensor`, `Optional[Tensor]` and elements of `TensorList` operands, whereas before we were checking for `__torch_function__` on all operands, so the relatively simplistic approach the tests were using before -- assuming all positional arguments might be tensors -- doesn't work anymore. I now think that checking for `__torch_function__` on all operands was a mistake in the original design. The updates to the signatures of the `lambda` functions are to handle this new, more stringent checking of signatures. I also added override support for `torch.nn.functional.threshold` `torch.nn.functional.layer_norm`, which did not yet have python-level support. Benchmarks are still WIP. Pull Request resolved: https://github.com/pytorch/pytorch/pull/34725 Reviewed By: mruberry Differential Revision: D22357738 Pulled By: ezyang fbshipit-source-id: 0e7f4a58517867b2e3f193a0a8390e2ed294e1f3
130 lines
4.6 KiB
Python
130 lines
4.6 KiB
Python
import argparse
|
|
import os
|
|
import sys
|
|
|
|
source_files = {'.py', '.cpp', '.h'}
|
|
|
|
DECLARATIONS_PATH = 'torch/share/ATen/Declarations.yaml'
|
|
|
|
|
|
# TODO: This is a little inaccurate, because it will also pick
|
|
# up setup_helper scripts which don't affect code generation
|
|
def all_generator_source():
|
|
r = []
|
|
for directory, _, filenames in os.walk('tools'):
|
|
for f in filenames:
|
|
if os.path.splitext(f)[1] in source_files:
|
|
full = os.path.join(directory, f)
|
|
r.append(full)
|
|
return sorted(r)
|
|
|
|
|
|
def generate_code(ninja_global=None,
|
|
declarations_path=None,
|
|
nn_path=None,
|
|
install_dir=None,
|
|
subset=None,
|
|
disable_autograd=False,
|
|
selected_op_list_path=None,
|
|
selected_op_list=None,
|
|
force_schema_registration=False):
|
|
# cwrap depends on pyyaml, so we can't import it earlier
|
|
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
sys.path.insert(0, root)
|
|
from tools.autograd.gen_autograd import gen_autograd, gen_autograd_python
|
|
from tools.autograd.gen_annotated_fn_args import gen_annotated
|
|
from tools.jit.gen_unboxing_wrappers import gen_unboxing_wrappers
|
|
|
|
# Build ATen based Variable classes
|
|
if install_dir is None:
|
|
install_dir = 'torch/csrc'
|
|
python_install_dir = 'torch/testing/_internal/generated'
|
|
else:
|
|
python_install_dir = install_dir
|
|
autograd_gen_dir = os.path.join(install_dir, 'autograd', 'generated')
|
|
jit_gen_dir = os.path.join(install_dir, 'jit', 'generated')
|
|
for d in (autograd_gen_dir, jit_gen_dir, python_install_dir):
|
|
if not os.path.exists(d):
|
|
os.makedirs(d)
|
|
runfiles_dir = os.environ.get("RUNFILES_DIR", None)
|
|
data_dir = os.path.join(runfiles_dir, 'pytorch') if runfiles_dir else ''
|
|
autograd_dir = os.path.join(data_dir, 'tools', 'autograd')
|
|
tools_jit_templates = os.path.join(data_dir, 'tools', 'jit', 'templates')
|
|
|
|
if subset == "pybindings" or not subset:
|
|
gen_autograd_python(declarations_path or DECLARATIONS_PATH, autograd_gen_dir, autograd_dir)
|
|
|
|
if subset == "libtorch" or not subset:
|
|
gen_autograd(
|
|
declarations_path or DECLARATIONS_PATH,
|
|
autograd_gen_dir,
|
|
autograd_dir,
|
|
disable_autograd=disable_autograd,
|
|
selected_op_list=selected_op_list,
|
|
)
|
|
gen_unboxing_wrappers(
|
|
declarations_path or DECLARATIONS_PATH,
|
|
jit_gen_dir,
|
|
tools_jit_templates,
|
|
disable_autograd=disable_autograd,
|
|
selected_op_list_path=selected_op_list_path,
|
|
selected_op_list=selected_op_list,
|
|
force_schema_registration=force_schema_registration)
|
|
|
|
if subset == "python" or not subset:
|
|
gen_annotated(
|
|
declarations_path or DECLARATIONS_PATH,
|
|
python_install_dir,
|
|
autograd_dir)
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description='Autogenerate code')
|
|
parser.add_argument('--declarations-path')
|
|
parser.add_argument('--nn-path')
|
|
parser.add_argument('--ninja-global')
|
|
parser.add_argument('--install_dir')
|
|
parser.add_argument(
|
|
'--subset',
|
|
help='Subset of source files to generate. Can be "libtorch" or "pybindings". Generates both when omitted.'
|
|
)
|
|
parser.add_argument(
|
|
'--disable-autograd',
|
|
default=False,
|
|
action='store_true',
|
|
help='It can skip generating autograd related code when the flag is set',
|
|
)
|
|
parser.add_argument(
|
|
'--selected-op-list-path',
|
|
help='Path to the yaml file that contains the list of operators to include for custom build.',
|
|
)
|
|
parser.add_argument(
|
|
'--selected-op-list',
|
|
nargs="*",
|
|
type=str,
|
|
help="""List of operator names to include for custom build, in addition to those in selected-op-list-path.
|
|
For example, --selected-op-list aten::add.Tensor aten::_convolution.""",
|
|
)
|
|
parser.add_argument(
|
|
'--force_schema_registration',
|
|
action='store_true',
|
|
help='force it to generate schema-only registrations for ops that are not'
|
|
'listed on --selected-op-list'
|
|
)
|
|
options = parser.parse_args()
|
|
generate_code(
|
|
options.ninja_global,
|
|
options.declarations_path,
|
|
options.nn_path,
|
|
options.install_dir,
|
|
options.subset,
|
|
options.disable_autograd,
|
|
options.selected_op_list_path,
|
|
options.selected_op_list,
|
|
options.force_schema_registration,
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|