mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-10-31 12:15:03 +08:00 
			
		
		
		
	This PR introduces a way to compile a region of FX graph using `fx.traceback.annotate`.
### UX
1) In the user code, mark the region that you want to be compiled with inductor using `with fx_traceback.annotate({"compile_with_inductor": 0})`. As of now, we just rely on the string `compile_with_inductor` and ignore the integer. As the needs arise, we can update the logic.
Example
```
        def fn(x, y):
            sin = torch.sin(x)
            with fx_traceback.annotate({"compile_with_inductor": 0}):
                mul = sin * y
                add = mul + 1
            return torch.sin(add)
```
2) You have to instruct the compiler to use the annotations with `compile_fx_annotated_nodes_with_inductor` transformation. This is somewhat controversial, and a user might expect that just setting annotation is enough. But for now to control the blast radius, we need to explicitly do this. One such example is
```
# Set the fw and bw compiler of aot_autograd to `compile_fx_annotated_nodes_with_inductor`
def aot_eager_regional_inductor():
    return aot_autograd(
        fw_compiler=compile_fx_annotated_nodes_with_inductor,
        bw_compiler=compile_fx_annotated_nodes_with_inductor,
    )
```
3) Fixable in short-term - You have to wrap the user code in `torch.fx.traceback.preserve_node_meta` to ensure that annotations are propagated to the compiler. This is fixable, just need to make CI happy.
### Implementation
1) Relies on `CapabilityBasedPartitioner` to "scoop" out regions based on annotations, and then create subgraphs in the main graph.
2) Call `torch._inductor.standalone_compile` on these subgraphs, and jam the returned callable into the FX graph at the place of call_module
Resulting graph looks something like this - search for `torch__inductor_standalone_compile_inner`
Forward graph
```
class GraphModule(torch.nn.Module):
    def forward(self, primals_1: "f32[10]", primals_2: "f32[10]"):
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:64 in fn, code: sin = torch.sin(x)
        sin: "f32[10]" = torch.ops.aten.sin.default(primals_1)
        # No stacktrace found for following nodes
        inner = torch__inductor_standalone_compile_inner(sin, primals_2)
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:68 in fn, code: add = mul + 1
        getitem: "f32[10]" = inner[0];  inner = None
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:70 in fn, code: return torch.sin(add)
        sin_1: "f32[10]" = torch.ops.aten.sin.default(getitem)
        return (sin_1, primals_1, primals_2, sin, getitem)
```
Backward graph
```
class GraphModule(torch.nn.Module):
    def forward(self, primals_1: "f32[10]", primals_2: "f32[10]", sin: "f32[10]", add: "f32[10]", tangents_1: "f32[10]"):
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:64 in fn, code: sin = torch.sin(x)
        cos_1: "f32[10]" = torch.ops.aten.cos.default(primals_1);  primals_1 = None
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:70 in fn, code: return torch.sin(add)
        cos: "f32[10]" = torch.ops.aten.cos.default(add);  add = None
        mul_1: "f32[10]" = torch.ops.aten.mul.Tensor(tangents_1, cos);  tangents_1 = cos = None
        # No stacktrace found for following nodes
        inner = torch__inductor_standalone_compile_inner(mul_1, sin, primals_2);  mul_1 = sin = primals_2 = None
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:67 in fn, code: mul = sin * y
        getitem: "f32[10]" = inner[0]
        getitem_1: "f32[10]" = inner[1];  inner = None
         # File: /data/users/anijain/pytorch2/test/dynamo/test_regional_inductor.py:64 in fn, code: sin = torch.sin(x)
        mul_4: "f32[10]" = torch.ops.aten.mul.Tensor(getitem_1, cos_1);  getitem_1 = cos_1 = None
        return (mul_4, getitem)
```
### Some issue raised in the HOP meeting
1) CSE will not differentiate different meta custom nodes and do wrong thing.
2) SAC - The recomputed forward will be smaller than the forward. Will we compile a smaller region than?
3) What happens if you have a op in the middle which does not disturb the topology, is it still 1 subgraph?
4) What happens with the nesting of `fx_traceback.annotate`? Are there any ordering requirements?
5) What are we going to use the annotations for?
   a) compile flex
   b) streams
   c) nn.Module info to organize MoE components for pipelining
   d) PP stages
   e) Rename graph nodes for more debugging
   f) No nested regional compile
Pull Request resolved: https://github.com/pytorch/pytorch/pull/164776
Approved by: https://github.com/SherlockNoMad
ghstack dependencies: #165188
		
	
		
			
				
	
	
		
			3551 lines
		
	
	
		
			96 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			3551 lines
		
	
	
		
			96 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
| #
 | |
| # PyTorch documentation build configuration file, created by
 | |
| # sphinx-quickstart on Fri Dec 23 13:31:47 2016.
 | |
| #
 | |
| # This file is execfile()d with the current directory set to its
 | |
| # containing dir.
 | |
| #
 | |
| # Note that not all possible configuration values are present in this
 | |
| # autogenerated file.
 | |
| #
 | |
| # All configuration values have a default; values that are commented out
 | |
| # serve to show the default.
 | |
| 
 | |
| import inspect
 | |
| import os
 | |
| 
 | |
| # import sys
 | |
| import pkgutil
 | |
| import re
 | |
| from os import path
 | |
| 
 | |
| # source code directory, relative to this file, for sphinx-autobuild
 | |
| # sys.path.insert(0, os.path.abspath('../..'))
 | |
| import torch
 | |
| 
 | |
| 
 | |
| # If extensions (or modules to document with autodoc) are in another directory,
 | |
| # add these directories to sys.path here. If the directory is relative to the
 | |
| # documentation root, use os.path.abspath to make it absolute, like shown here.
 | |
| 
 | |
| 
 | |
| try:
 | |
|     import torchvision  # noqa: F401
 | |
| except ImportError:
 | |
|     import warnings
 | |
| 
 | |
|     warnings.warn('unable to load "torchvision" package')
 | |
| 
 | |
| RELEASE = os.environ.get("RELEASE", False)
 | |
| 
 | |
| import pytorch_sphinx_theme2
 | |
| 
 | |
| 
 | |
| html_theme = "pytorch_sphinx_theme2"
 | |
| html_theme_path = [pytorch_sphinx_theme2.get_html_theme_path()]
 | |
| 
 | |
| 
 | |
| # -- General configuration ------------------------------------------------
 | |
| 
 | |
| # Add any Sphinx extension module names here, as strings. They can be
 | |
| # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 | |
| # ones.
 | |
| extensions = [
 | |
|     "sphinx.ext.autodoc",
 | |
|     "sphinx.ext.autosummary",
 | |
|     "sphinx.ext.doctest",
 | |
|     "sphinx.ext.intersphinx",
 | |
|     "sphinx.ext.todo",
 | |
|     "sphinx.ext.coverage",
 | |
|     "sphinx.ext.napoleon",
 | |
|     "sphinx.ext.autosectionlabel",
 | |
|     "sphinxcontrib.katex",
 | |
|     "sphinx_copybutton",
 | |
|     "sphinx_design",
 | |
|     "myst_nb",
 | |
|     "sphinx.ext.linkcode",
 | |
|     "sphinxcontrib.mermaid",
 | |
|     "sphinx_sitemap",
 | |
| ]
 | |
| 
 | |
| myst_enable_extensions = [
 | |
|     "colon_fence",
 | |
|     "deflist",
 | |
|     "html_image",
 | |
| ]
 | |
| 
 | |
| html_baseurl = "https://docs.pytorch.org/docs/stable/"  # needed for sphinx-sitemap
 | |
| sitemap_locales = [None]
 | |
| sitemap_excludes = [
 | |
|     "search.html",
 | |
|     "genindex.html",
 | |
| ]
 | |
| sitemap_url_scheme = "{link}"
 | |
| 
 | |
| html_additional_pages = {
 | |
|     "404": "404.html",
 | |
| }
 | |
| 
 | |
| # build the templated autosummary files
 | |
| autosummary_generate = True
 | |
| numpydoc_show_class_members = False
 | |
| 
 | |
| # autosectionlabel throws warnings if section names are duplicated.
 | |
| # The following tells autosectionlabel to not throw a warning for
 | |
| # duplicated section names that are in different documents.
 | |
| autosectionlabel_prefix_document = True
 | |
| 
 | |
| # katex options
 | |
| #
 | |
| #
 | |
| 
 | |
| katex_prerender = True
 | |
| 
 | |
| # General information about the project.
 | |
| project = "PyTorch"
 | |
| copyright = "PyTorch Contributors"
 | |
| author = "PyTorch Contributors"
 | |
| torch_version = str(torch.__version__)
 | |
| 
 | |
| # The version info for the project you're documenting, acts as replacement for
 | |
| # |version| and |release|, also used in various other places throughout the
 | |
| # built documents.
 | |
| #
 | |
| # The short X.Y version.
 | |
| # TODO: change to [:2] at v1.0
 | |
| version = "main (" + torch_version + " )"
 | |
| # The full version, including alpha/beta/rc tags.
 | |
| release = "main"
 | |
| 
 | |
| # Customized html_title here.
 | |
| # Default is " ".join(project, release, "documentation") if not set
 | |
| if RELEASE:
 | |
|     # Turn 1.11.0aHASH into 1.11
 | |
|     # Note: the release candidates should no longer have the aHASH suffix, but in any
 | |
|     # case we wish to leave only major.minor, even for rc builds.
 | |
|     version = ".".join(torch_version.split(".")[:2])
 | |
|     html_title = " ".join((project, version, "documentation"))
 | |
|     release = version
 | |
| 
 | |
| switcher_version = "main" if not RELEASE else version
 | |
| 
 | |
| html_static_path = ["_static"]
 | |
| html_theme_options = {
 | |
|     "logo": {"text": "Home"},
 | |
|     "analytics_id": "GTM-T8XT4PS",
 | |
|     "canonical_url": "https://docs.pytorch.org/docs/stable/",
 | |
|     "switcher": {
 | |
|         "json_url": "https://docs.pytorch.org/docs/pytorch-versions.json",
 | |
|         "version_match": switcher_version,
 | |
|     },
 | |
|     "show_toc_level": 2,
 | |
|     "navigation_with_keys": False,
 | |
|     "external_links": [
 | |
|         {
 | |
|             "name": "Tutorials",
 | |
|             "url": "https://docs.pytorch.org/tutorials/",
 | |
|         },
 | |
|     ],
 | |
|     "show_version_warning_banner": True,
 | |
|     "icon_links": [
 | |
|         {
 | |
|             "name": "X",
 | |
|             "url": "https://x.com/PyTorch",
 | |
|             "icon": "fa-brands fa-x-twitter",
 | |
|         },
 | |
|         {
 | |
|             "name": "GitHub",
 | |
|             "url": "https://github.com/pytorch/pytorch",
 | |
|             "icon": "fa-brands fa-github",
 | |
|         },
 | |
|         {
 | |
|             "name": "PyTorch Forum",
 | |
|             "url": "https://discuss.pytorch.org/",
 | |
|             "icon": "fa-brands fa-discourse",
 | |
|         },
 | |
|         {
 | |
|             "name": "PyPi",
 | |
|             "url": "https://pypi.org/project/torch/",
 | |
|             "icon": "fa-brands fa-python",
 | |
|         },
 | |
|     ],
 | |
|     "navbar_align": "left",
 | |
|     "navbar_start": ["version-switcher", "navbar-logo"],
 | |
|     "navbar_center": ["navbar-nav"],
 | |
|     "navbar_end": ["search-field-custom", "theme-switcher", "navbar-icon-links"],
 | |
|     "header_links_before_dropdown": 6,
 | |
|     "navbar_persistent": [],
 | |
|     "use_edit_page_button": True,
 | |
|     "pytorch_project": "docs",
 | |
| }
 | |
| 
 | |
| theme_variables = pytorch_sphinx_theme2.get_theme_variables()
 | |
| html_context = {
 | |
|     "github_url": "https://github.com",
 | |
|     "github_user": "pytorch",
 | |
|     "github_repo": "pytorch",
 | |
|     "feedback_url": "https://github.com/pytorch/pytorch",
 | |
|     "github_version": "main",
 | |
|     "pytorch_project": "docs",
 | |
|     "doc_path": "docs/source",
 | |
|     "theme_variables": theme_variables,
 | |
|     # library links are defined in
 | |
|     # pytorch_sphinx_theme2/pytorch_sphinx_theme2/links.json
 | |
|     "library_links": theme_variables.get("library_links", []),
 | |
|     "version": version,
 | |
|     "date_info": {
 | |
|         "paths_to_skip": ["generated/", "index"],
 | |
|     },
 | |
| }
 | |
| 
 | |
| napoleon_use_ivar = True
 | |
| 
 | |
| # Add any paths that contain templates here, relative to this directory.
 | |
| templates_path = [
 | |
|     "_templates",
 | |
|     os.path.join(os.path.dirname(pytorch_sphinx_theme2.__file__), "templates"),
 | |
| ]
 | |
| # TODO: document these and remove them from here.
 | |
| 
 | |
| coverage_ignore_functions = [
 | |
|     # torch
 | |
|     "typename",
 | |
|     # torch.cuda._sanitizer
 | |
|     "zip_arguments",
 | |
|     "zip_by_key",
 | |
|     # torch.distributed.autograd
 | |
|     "is_available",
 | |
|     # torch.distributed.checkpoint.state_dict
 | |
|     "gc_context",
 | |
|     # torch.distributed.elastic.events
 | |
|     "record_rdzv_event",
 | |
|     # torch.distributed.elastic.metrics
 | |
|     "initialize_metrics",
 | |
|     # torch.distributed.elastic.rendezvous.registry
 | |
|     "get_rendezvous_handler",
 | |
|     # torch.distributed.launch
 | |
|     "launch",
 | |
|     "main",
 | |
|     "parse_args",
 | |
|     # torch.distributed.rpc
 | |
|     "is_available",
 | |
|     # torch.distributed.run
 | |
|     "config_from_args",
 | |
|     "determine_local_world_size",
 | |
|     "get_args_parser",
 | |
|     "get_rdzv_endpoint",
 | |
|     "get_use_env",
 | |
|     "main",
 | |
|     "parse_args",
 | |
|     "parse_min_max_nnodes",
 | |
|     "run",
 | |
|     "run_script_path",
 | |
|     # torch.distributions.constraints
 | |
|     "is_dependent",
 | |
|     # torch.hub
 | |
|     "import_module",
 | |
|     # torch.jit
 | |
|     "export_opnames",
 | |
|     # torch.jit.unsupported_tensor_ops
 | |
|     "execWrapper",
 | |
|     # torch.onnx
 | |
|     "unregister_custom_op_symbolic",
 | |
|     # torch.ao.quantization
 | |
|     "default_eval_fn",
 | |
|     # torch.backends
 | |
|     "disable_global_flags",
 | |
|     "flags_frozen",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks
 | |
|     "register_ddp_comm_hook",
 | |
|     # torch.nn.parallel
 | |
|     "DistributedDataParallelCPU",
 | |
|     # torch.utils
 | |
|     "set_module",
 | |
|     "burn_in_info",
 | |
|     "get_info_and_burn_skeleton",
 | |
|     "get_inline_skeleton",
 | |
|     "get_model_info",
 | |
|     "get_storage_info",
 | |
|     "hierarchical_pickle",
 | |
|     # torch.amp.autocast_mode
 | |
|     "autocast_decorator",
 | |
|     # torch.ao.nn.quantized.dynamic.modules.rnn
 | |
|     "apply_permutation",
 | |
|     "pack_weight_bias",
 | |
|     # torch.ao.nn.quantized.reference.modules.rnn
 | |
|     "get_quantized_weight",
 | |
|     # torch.ao.ns.fx.graph_matcher
 | |
|     "get_matching_subgraph_pairs",
 | |
|     # torch.ao.ns.fx.graph_passes
 | |
|     "add_loggers_to_model",
 | |
|     "create_a_shadows_b",
 | |
|     # torch.ao.ns.fx.mappings
 | |
|     "add_op_to_sets_of_related_ops",
 | |
|     "get_base_name_for_op",
 | |
|     "get_base_name_to_sets_of_related_ops",
 | |
|     "get_node_type_to_io_type_map",
 | |
|     "get_unmatchable_types_map",
 | |
|     # torch.ao.ns.fx.n_shadows_utils
 | |
|     "create_add_loggers_graph",
 | |
|     "create_n_transformed_and_logged_copies_of_subgraph",
 | |
|     "create_one_transformed_and_logged_copy_of_subgraph",
 | |
|     "create_results_comparison",
 | |
|     "create_submodule_from_subgraph",
 | |
|     "extract_weight_comparison",
 | |
|     "group_results_by_subgraph",
 | |
|     "print_n_shadows_summary",
 | |
|     # torch.ao.ns.fx.pattern_utils
 | |
|     "end_node_matches_reversed_fusion",
 | |
|     "get_reversed_fusions",
 | |
|     "get_type_a_related_to_b",
 | |
|     # torch.ao.ns.fx.utils
 | |
|     "get_arg_indices_of_inputs_to_log",
 | |
|     "get_node_first_input_and_output_type",
 | |
|     "get_node_input_qparams",
 | |
|     "get_normalized_nth_input",
 | |
|     "get_number_of_non_param_args",
 | |
|     "get_target_type_str",
 | |
|     "maybe_add_missing_fqns",
 | |
|     "maybe_dequantize_first_two_tensor_args_and_handle_tuples",
 | |
|     "op_type_supports_shadowing",
 | |
|     "rekey_logger_info_on_node_name_of_model",
 | |
|     "return_first_non_observer_node",
 | |
|     # torch.ao.ns.fx.weight_utils
 | |
|     "extract_weight_from_node",
 | |
|     "get_conv_fun_weight",
 | |
|     "get_conv_mod_weight",
 | |
|     "get_linear_fun_weight",
 | |
|     "get_linear_mod_weight",
 | |
|     "get_lstm_mod_weights",
 | |
|     "get_lstm_weight",
 | |
|     "get_op_to_type_to_weight_extraction_fn",
 | |
|     "get_qconv_fun_weight",
 | |
|     "get_qlinear_fun_weight",
 | |
|     "get_qlstm_weight",
 | |
|     "mod_0_weight_detach",
 | |
|     "mod_weight_bias_0",
 | |
|     "mod_weight_detach",
 | |
|     # torch.ao.pruning.sparsifier.utils
 | |
|     "fqn_to_module",
 | |
|     "get_arg_info_from_tensor_fqn",
 | |
|     "module_contains_param",
 | |
|     "module_to_fqn",
 | |
|     "swap_module",
 | |
|     # torch.ao.quantization.backend_config.executorch
 | |
|     "get_executorch_backend_config",
 | |
|     # torch.ao.quantization.backend_config.fbgemm
 | |
|     "get_fbgemm_backend_config",
 | |
|     # torch.ao.quantization.backend_config.native
 | |
|     "get_native_backend_config",
 | |
|     "get_native_backend_config_dict",
 | |
|     "get_test_only_legacy_native_backend_config",
 | |
|     "get_test_only_legacy_native_backend_config_dict",
 | |
|     # torch.ao.quantization.backend_config.onednn
 | |
|     "get_onednn_backend_config",
 | |
|     # torch.ao.quantization.backend_config.qnnpack
 | |
|     "get_qnnpack_backend_config",
 | |
|     # torch.ao.quantization.backend_config.tensorrt
 | |
|     "get_tensorrt_backend_config",
 | |
|     "get_tensorrt_backend_config_dict",
 | |
|     # torch.ao.quantization.backend_config.utils
 | |
|     "entry_to_pretty_str",
 | |
|     "get_fused_module_classes",
 | |
|     "get_fuser_method_mapping",
 | |
|     "get_fusion_pattern_to_extra_inputs_getter",
 | |
|     "get_fusion_pattern_to_root_node_getter",
 | |
|     "get_module_to_qat_module",
 | |
|     "get_pattern_to_dtype_configs",
 | |
|     "get_pattern_to_input_type_to_index",
 | |
|     "get_qat_module_classes",
 | |
|     "get_root_module_to_quantized_reference_module",
 | |
|     "pattern_to_human_readable",
 | |
|     "remove_boolean_dispatch_from_name",
 | |
|     # torch.ao.quantization.backend_config.x86
 | |
|     "get_x86_backend_config",
 | |
|     # torch.ao.quantization.fuse_modules
 | |
|     "fuse_known_modules",
 | |
|     "fuse_modules_qat",
 | |
|     # torch.ao.quantization.fuser_method_mappings
 | |
|     "fuse_conv_bn",
 | |
|     "fuse_conv_bn_relu",
 | |
|     "fuse_convtranspose_bn",
 | |
|     "fuse_linear_bn",
 | |
|     "get_fuser_method",
 | |
|     "get_fuser_method_new",
 | |
|     # torch.ao.quantization.fx.convert
 | |
|     "convert",
 | |
|     "convert_custom_module",
 | |
|     "convert_standalone_module",
 | |
|     "convert_weighted_module",
 | |
|     # torch.ao.quantization.fx.fuse
 | |
|     "fuse",
 | |
|     # torch.ao.quantization.fx.lower_to_fbgemm
 | |
|     "lower_to_fbgemm",
 | |
|     # torch.ao.quantization.fx.lower_to_qnnpack
 | |
|     "lower_to_qnnpack",
 | |
|     # torch.ao.quantization.fx.pattern_utils
 | |
|     "get_default_fusion_patterns",
 | |
|     "get_default_output_activation_post_process_map",
 | |
|     "get_default_quant_patterns",
 | |
|     # torch.ao.quantization.fx.prepare
 | |
|     "insert_observers_for_model",
 | |
|     "prepare",
 | |
|     "propagate_dtypes_for_known_nodes",
 | |
|     # torch.ao.quantization.fx.utils
 | |
|     "all_node_args_except_first",
 | |
|     "all_node_args_have_no_tensors",
 | |
|     "assert_and_get_unique_device",
 | |
|     "collect_producer_nodes",
 | |
|     "create_getattr_from_value",
 | |
|     "create_node_from_old_node_preserve_meta",
 | |
|     "get_custom_module_class_keys",
 | |
|     "get_linear_prepack_op_for_dtype",
 | |
|     "get_new_attr_name_with_prefix",
 | |
|     "get_non_observable_arg_indexes_and_types",
 | |
|     "get_qconv_prepack_op",
 | |
|     "get_skipped_module_name_and_classes",
 | |
|     "graph_module_from_producer_nodes",
 | |
|     "maybe_get_next_module",
 | |
|     "node_arg_is_bias",
 | |
|     "node_arg_is_weight",
 | |
|     "return_arg_list",
 | |
|     # torch.ao.quantization.pt2e.graph_utils
 | |
|     "bfs_trace_with_node_process",
 | |
|     "find_sequential_partitions",
 | |
|     "get_equivalent_types",
 | |
|     "update_equivalent_types_dict",
 | |
|     # torch.ao.quantization.pt2e.prepare
 | |
|     "prepare",
 | |
|     # torch.ao.quantization.pt2e.representation.rewrite
 | |
|     "reference_representation_rewrite",
 | |
|     # torch.ao.quantization.pt2e.utils
 | |
|     "fold_bn_weights_into_conv_node",
 | |
|     "remove_tensor_overload_for_qdq_ops",
 | |
|     # torch.ao.quantization.qconfig
 | |
|     "get_default_qat_qconfig",
 | |
|     "get_default_qat_qconfig_dict",
 | |
|     "get_default_qconfig",
 | |
|     "get_default_qconfig_dict",
 | |
|     "qconfig_equals",
 | |
|     # torch.ao.quantization.quantization_mappings
 | |
|     "get_default_dynamic_quant_module_mappings",
 | |
|     "get_default_dynamic_sparse_quant_module_mappings",
 | |
|     "get_default_float_to_quantized_operator_mappings",
 | |
|     "get_default_qat_module_mappings",
 | |
|     "get_default_qconfig_propagation_list",
 | |
|     "get_default_static_quant_module_mappings",
 | |
|     "get_default_static_quant_reference_module_mappings",
 | |
|     "get_default_static_sparse_quant_module_mappings",
 | |
|     "get_dynamic_quant_module_class",
 | |
|     "get_embedding_qat_module_mappings",
 | |
|     "get_embedding_static_quant_module_mappings",
 | |
|     "get_quantized_operator",
 | |
|     "get_static_quant_module_class",
 | |
|     "no_observer_set",
 | |
|     # torch.ao.quantization.quantize
 | |
|     "get_default_custom_config_dict",
 | |
|     # torch.ao.quantization.quantize_fx
 | |
|     "attach_preserved_attrs_to_model",
 | |
|     "convert_to_reference_fx",
 | |
|     # torch.ao.quantization.quantize_jit
 | |
|     "convert_dynamic_jit",
 | |
|     "convert_jit",
 | |
|     "fuse_conv_bn_jit",
 | |
|     "prepare_dynamic_jit",
 | |
|     "prepare_jit",
 | |
|     "quantize_dynamic_jit",
 | |
|     "quantize_jit",
 | |
|     "script_qconfig",
 | |
|     "script_qconfig_dict",
 | |
|     # torch.ao.quantization.quantize_pt2e
 | |
|     "convert_pt2e",
 | |
|     "prepare_pt2e",
 | |
|     "prepare_qat_pt2e",
 | |
|     # torch.ao.quantization.quantizer.embedding_quantizer
 | |
|     "get_embedding_operators_config",
 | |
|     # torch.ao.quantization.quantizer.xnnpack_quantizer_utils
 | |
|     "get_bias_qspec",
 | |
|     "get_input_act_qspec",
 | |
|     "get_output_act_qspec",
 | |
|     "get_weight_qspec",
 | |
|     "propagate_annotation",
 | |
|     "register_annotator",
 | |
|     "activation_dtype",
 | |
|     "check_node",
 | |
|     "has_no_children_ignoring_parametrizations",
 | |
|     "is_per_channel",
 | |
|     "is_per_tensor",
 | |
|     "op_is_int8_dynamically_quantized",
 | |
|     "to_underlying_dtype",
 | |
|     "weight_dtype",
 | |
|     "weight_is_quantized",
 | |
|     "weight_is_statically_quantized",
 | |
|     # torch.backends.cudnn.rnn
 | |
|     "get_cudnn_mode",
 | |
|     "init_dropout_state",
 | |
|     # torch.backends.xeon.run_cpu
 | |
|     "create_args",
 | |
|     # torch.cuda.amp.autocast_mode
 | |
|     "custom_bwd",
 | |
|     "custom_fwd",
 | |
|     # torch.cuda.amp.common
 | |
|     "amp_definitely_not_available",
 | |
|     # torch.mtia.memory
 | |
|     "reset_peak_memory_stats",
 | |
|     # torch.cuda.nccl
 | |
|     "all_gather",
 | |
|     "all_reduce",
 | |
|     "broadcast",
 | |
|     "init_rank",
 | |
|     "reduce",
 | |
|     "reduce_scatter",
 | |
|     "unique_id",
 | |
|     "version",
 | |
|     # torch.cuda.profiler
 | |
|     "init",
 | |
|     "profile",
 | |
|     "start",
 | |
|     "stop",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook
 | |
|     "hook_with_zero_step",
 | |
|     "hook_with_zero_step_interleaved",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook
 | |
|     "post_localSGD_hook",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks.quantization_hooks
 | |
|     "quantization_perchannel_hook",
 | |
|     "quantization_pertensor_hook",
 | |
|     # torch.distributed.algorithms.model_averaging.utils
 | |
|     "average_parameters",
 | |
|     "average_parameters_or_parameter_groups",
 | |
|     "get_params_to_average",
 | |
|     # torch.distributed.checkpoint.default_planner
 | |
|     "create_default_global_load_plan",
 | |
|     "create_default_global_save_plan",
 | |
|     "create_default_local_load_plan",
 | |
|     "create_default_local_save_plan",
 | |
|     # torch.distributed.checkpoint.optimizer
 | |
|     "load_sharded_optimizer_state_dict",
 | |
|     # torch.distributed.checkpoint.planner_helpers
 | |
|     "create_read_items_for_chunk_list",
 | |
|     # torch.distributed.checkpoint.state_dict_loader
 | |
|     "load_state_dict",
 | |
|     # torch.distributed.checkpoint.state_dict_saver
 | |
|     "save_state_dict",
 | |
|     # torch.distributed.checkpoint.utils
 | |
|     "find_state_dict_object",
 | |
|     "find_tensor_shard",
 | |
|     "isend",
 | |
|     "monitored_barrier",
 | |
|     "new_group",
 | |
|     "new_subgroups",
 | |
|     "new_subgroups_by_enumeration",
 | |
|     "recv",
 | |
|     "reduce",
 | |
|     "reduce_scatter",
 | |
|     "reduce_scatter_tensor",
 | |
|     "scatter",
 | |
|     "scatter_object_list",
 | |
|     "send",
 | |
|     "supports_complex",
 | |
|     # torch.distributed.elastic.events.handlers
 | |
|     "get_logging_handler",
 | |
|     # torch.distributed.elastic.metrics.api
 | |
|     "configure",
 | |
|     "getStream",
 | |
|     "get_elapsed_time_ms",
 | |
|     "prof",
 | |
|     "profile",
 | |
|     "publish_metric",
 | |
|     "put_metric",
 | |
|     # torch.distributed.elastic.multiprocessing.api
 | |
|     "get_std_cm",
 | |
|     "to_map",
 | |
|     # torch.distributed.elastic.multiprocessing.errors.handlers
 | |
|     "get_error_handler",
 | |
|     # torch.distributed.elastic.multiprocessing.redirects
 | |
|     "get_libc",
 | |
|     "redirect",
 | |
|     # torch.distributed.elastic.multiprocessing.tail_log
 | |
|     "tail_logfile",
 | |
|     # torch.distributed.elastic.rendezvous.dynamic_rendezvous
 | |
|     "get_method_name",
 | |
|     # torch.distributed.elastic.rendezvous.etcd_rendezvous
 | |
|     "create_rdzv_handler",
 | |
|     # torch.distributed.elastic.rendezvous.etcd_server
 | |
|     "find_free_port",
 | |
|     "stop_etcd",
 | |
|     # torch.distributed.elastic.rendezvous.etcd_store
 | |
|     "cas_delay",
 | |
|     # torch.distributed.elastic.rendezvous.static_tcp_rendezvous
 | |
|     "create_rdzv_handler",
 | |
|     # torch.distributed.elastic.rendezvous.utils
 | |
|     "parse_rendezvous_endpoint",
 | |
|     # torch.distributed.elastic.timer.api
 | |
|     "configure",
 | |
|     "expires",
 | |
|     # torch.distributed.elastic.utils.api
 | |
|     "get_env_variable_or_raise",
 | |
|     "get_socket_with_port",
 | |
|     # torch.distributed.elastic.utils.distributed
 | |
|     "create_c10d_store",
 | |
|     "get_free_port",
 | |
|     "get_socket_with_port",
 | |
|     # torch.distributed.elastic.utils.log_level
 | |
|     "get_log_level",
 | |
|     # torch.distributed.elastic.utils.logging
 | |
|     "get_logger",
 | |
|     # torch.distributed.elastic.utils.store
 | |
|     "barrier",
 | |
|     "get_all",
 | |
|     "synchronize",
 | |
|     "store_timeout",
 | |
|     # torch.distributed.fsdp.wrap
 | |
|     "always_wrap_policy",
 | |
|     "enable_wrap",
 | |
|     "lambda_auto_wrap_policy",
 | |
|     "size_based_auto_wrap_policy",
 | |
|     "transformer_auto_wrap_policy",
 | |
|     "wrap",
 | |
|     # torch.distributed.nn.functional
 | |
|     "all_to_all",
 | |
|     "all_to_all_single",
 | |
|     # torch.distributed.nn.jit.instantiator
 | |
|     "get_arg_return_types_from_interface",
 | |
|     "instantiate_non_scriptable_remote_module_template",
 | |
|     "instantiate_scriptable_remote_module_template",
 | |
|     # torch.distributed.nn.jit.templates.remote_module_template
 | |
|     "get_remote_module_template",
 | |
|     # torch.distributed.optim.utils
 | |
|     "as_functional_optim",
 | |
|     "register_functional_optim",
 | |
|     # torch.distributed.rendezvous
 | |
|     "register_rendezvous_handler",
 | |
|     "rendezvous",
 | |
|     # torch.distributed.rpc.api
 | |
|     "get_worker_info",
 | |
|     "method_factory",
 | |
|     "new_method",
 | |
|     "remote",
 | |
|     "rpc_async",
 | |
|     "rpc_sync",
 | |
|     "shutdown",
 | |
|     # torch.distributed.rpc.backend_registry
 | |
|     "backend_registered",
 | |
|     "construct_rpc_backend_options",
 | |
|     "init_backend",
 | |
|     "register_backend",
 | |
|     # torch.distributed.rpc.internal
 | |
|     "deserialize",
 | |
|     "serialize",
 | |
|     # torch.distributed.tensor.parallel.api
 | |
|     "parallelize_module",
 | |
|     # torch.distributed.tensor.parallel.input_reshard
 | |
|     "input_reshard",
 | |
|     # torch.distributed.tensor.parallel.loss
 | |
|     "loss_parallel",
 | |
|     # torch.distributed.tensor.parallel.style
 | |
|     "make_sharded_output_tensor",
 | |
|     # torch.distributions.utils
 | |
|     "broadcast_all",
 | |
|     "clamp_probs",
 | |
|     "logits_to_probs",
 | |
|     "probs_to_logits",
 | |
|     "tril_matrix_to_vec",
 | |
|     "vec_to_tril_matrix",
 | |
|     # torch.fx.annotate
 | |
|     "annotate",
 | |
|     # torch.fx.experimental.accelerator_partitioner
 | |
|     "check_dependency",
 | |
|     "combine_two_partitions",
 | |
|     "get_bfs_level_partition",
 | |
|     "get_device_partition_stats",
 | |
|     "get_device_to_partitions_mapping",
 | |
|     "get_logical_id_to_device",
 | |
|     "get_node_to_partition_mapping",
 | |
|     "reorganize_partitions",
 | |
|     "reset_partition_device",
 | |
|     "set_parents_and_children",
 | |
|     # torch.fx.experimental.const_fold
 | |
|     "get_unique_attr_name_in_module",
 | |
|     "split_const_subgraphs",
 | |
|     # torch.fx.experimental.debug
 | |
|     "set_trace",
 | |
|     # torch.fx.experimental.graph_gradual_typechecker
 | |
|     "adaptiveavgpool2d_check",
 | |
|     "adaptiveavgpool2d_inference_rule",
 | |
|     "add_inference_rule",
 | |
|     "all_eq",
 | |
|     "bn2d_inference_rule",
 | |
|     "broadcast_types",
 | |
|     "calculate_out_dimension",
 | |
|     "conv2d_inference_rule",
 | |
|     "conv_refinement_rule",
 | |
|     "conv_rule",
 | |
|     "element_wise_eq",
 | |
|     "expand_to_tensor_dim",
 | |
|     "first_two_eq",
 | |
|     "flatten_check",
 | |
|     "flatten_inference_rule",
 | |
|     "flatten_refinement_rule",
 | |
|     "get_attr_inference_rule",
 | |
|     "get_greatest_upper_bound",
 | |
|     "get_parameter",
 | |
|     "linear_check",
 | |
|     "linear_inference_rule",
 | |
|     "linear_refinement_rule",
 | |
|     "maxpool2d_check",
 | |
|     "maxpool2d_inference_rule",
 | |
|     "register_algebraic_expressions_inference_rule",
 | |
|     "register_inference_rule",
 | |
|     "register_refinement_rule",
 | |
|     "relu_inference_rule",
 | |
|     "reshape_inference_rule",
 | |
|     "transpose_inference_rule",
 | |
|     # torch.fx.experimental.merge_matmul
 | |
|     "are_nodes_independent",
 | |
|     "may_depend_on",
 | |
|     "merge_matmul",
 | |
|     "split_result_tensors",
 | |
|     # torch.fx.experimental.meta_tracer
 | |
|     "embedding_override",
 | |
|     "functional_relu_override",
 | |
|     "gen_constructor_wrapper",
 | |
|     "nn_layernorm_override",
 | |
|     "proxys_to_metas",
 | |
|     "symbolic_trace",
 | |
|     "torch_abs_override",
 | |
|     "torch_nn_relu_override",
 | |
|     "torch_relu_override",
 | |
|     "torch_where_override",
 | |
|     # torch.fx.experimental.migrate_gradual_types.constraint
 | |
|     "is_algebraic_expression",
 | |
|     "is_bool_expr",
 | |
|     "is_dim",
 | |
|     # torch.fx.experimental.migrate_gradual_types.constraint_generator
 | |
|     "adaptive_inference_rule",
 | |
|     "add_layer_norm_constraints",
 | |
|     "add_linear_constraints",
 | |
|     "arange_inference_rule",
 | |
|     "assert_inference_rule",
 | |
|     "batchnorm_inference_rule",
 | |
|     "bmm_inference_rule",
 | |
|     "broadcasting_inference_rule",
 | |
|     "conv2d_inference_rule",
 | |
|     "cumsum_inference_rule",
 | |
|     "embedding_inference_rule",
 | |
|     "embedding_inference_rule_functional",
 | |
|     "eq_inference_rule",
 | |
|     "equality_inference_rule",
 | |
|     "expand_inference_rule",
 | |
|     "flatten_inference_rule",
 | |
|     "full_inference_rule",
 | |
|     "gen_broadcasting_constraints",
 | |
|     "gen_embedding_rules",
 | |
|     "gen_layer_norm_constraints",
 | |
|     "generate_flatten_constraints",
 | |
|     "get_attr_inference_rule",
 | |
|     "getitem_inference_rule",
 | |
|     "gt_inference_rule",
 | |
|     "index_select_inference_rule",
 | |
|     "layer_norm_functional",
 | |
|     "layer_norm_inference_rule",
 | |
|     "linear_constraints",
 | |
|     "linear_inference_rule",
 | |
|     "lt_inference_rule",
 | |
|     "masked_fill_inference_rule",
 | |
|     "maxpool_inference_rule",
 | |
|     "neq_inference_rule",
 | |
|     "range_check",
 | |
|     "register_inference_rule",
 | |
|     "relu_inference_rule",
 | |
|     "reshape_inference_rule",
 | |
|     "size_inference_rule",
 | |
|     "tensor_inference_rule",
 | |
|     "torch_dim_inference_rule",
 | |
|     "torch_linear_inference_rule",
 | |
|     "transpose_inference_rule",
 | |
|     "type_inference_rule",
 | |
|     "view_inference_rule",
 | |
|     # torch.fx.experimental.migrate_gradual_types.constraint_transformation
 | |
|     "apply_padding",
 | |
|     "broadcast_dim",
 | |
|     "calc_last_two_dims",
 | |
|     "create_equality_constraints_for_broadcasting",
 | |
|     "gen_all_reshape_possibilities",
 | |
|     "gen_broadcasting_constraints",
 | |
|     "gen_consistency_constraints",
 | |
|     "gen_greatest_upper_bound",
 | |
|     "gen_lists_of_dims",
 | |
|     "generate_all_broadcasting_possibilities_no_padding",
 | |
|     "generate_all_int_dyn_dim_possibilities",
 | |
|     "generate_binconstraint_d",
 | |
|     "generate_binconstraint_t",
 | |
|     "generate_broadcasting",
 | |
|     "generate_calc_conv",
 | |
|     "generate_calc_maxpool",
 | |
|     "generate_calc_product",
 | |
|     "generate_conj",
 | |
|     "generate_d_gub",
 | |
|     "generate_disj",
 | |
|     "generate_gub",
 | |
|     "generate_reshape",
 | |
|     "is_dim_div_by_target",
 | |
|     "is_target_div_by_dim",
 | |
|     "no_broadcast_dim_with_index",
 | |
|     "register_transformation_rule",
 | |
|     "transform_constraint",
 | |
|     "transform_get_item",
 | |
|     "transform_get_item_tensor",
 | |
|     "transform_index_select",
 | |
|     "transform_transpose",
 | |
|     "valid_index",
 | |
|     "valid_index_tensor",
 | |
|     # torch.fx.experimental.migrate_gradual_types.transform_to_z3
 | |
|     "evaluate_conditional_with_constraints",
 | |
|     # torch.fx.experimental.migrate_gradual_types.util
 | |
|     "gen_bvar",
 | |
|     "gen_dvar",
 | |
|     "gen_nat_constraints",
 | |
|     "gen_tensor_dims",
 | |
|     "gen_tvar",
 | |
|     # torch.fx.experimental.optimization
 | |
|     "extract_subgraph",
 | |
|     "fuse",
 | |
|     "gen_mkl_autotuner",
 | |
|     "matches_module_pattern",
 | |
|     "modules_to_mkldnn",
 | |
|     "optimize_for_inference",
 | |
|     "remove_dropout",
 | |
|     "replace_node_module",
 | |
|     "reset_modules",
 | |
|     "use_mkl_length",
 | |
|     # torch.fx.experimental.partitioner_utils
 | |
|     "get_comm_latency_between",
 | |
|     "get_extra_size_of",
 | |
|     "get_latency_of_one_partition",
 | |
|     "get_latency_of_partitioned_graph",
 | |
|     "get_partition_to_latency_mapping",
 | |
|     # torch.fx.experimental.proxy_tensor
 | |
|     "decompose",
 | |
|     "disable_autocast_cache",
 | |
|     "disable_proxy_modes_tracing",
 | |
|     "dispatch_trace",
 | |
|     "extract_val",
 | |
|     "fake_signature",
 | |
|     "fetch_sym_proxy",
 | |
|     "fetch_object_proxy",
 | |
|     "get_innermost_proxy_mode",
 | |
|     "get_isolated_graphmodule",
 | |
|     "get_proxy_slot",
 | |
|     "get_torch_dispatch_modes",
 | |
|     "has_proxy_slot",
 | |
|     "is_sym_node",
 | |
|     "maybe_handle_decomp",
 | |
|     "proxy_call",
 | |
|     "set_meta",
 | |
|     "set_original_aten_op",
 | |
|     "set_proxy_slot",
 | |
|     "snapshot_fake",
 | |
|     "thunkify",
 | |
|     "track_tensor",
 | |
|     "track_tensor_tree",
 | |
|     "wrap_key",
 | |
|     "wrapper_and_args_for_make_fx",
 | |
|     # torch.fx.experimental.recording
 | |
|     "record_shapeenv_event",
 | |
|     "replay_shape_env_events",
 | |
|     "shape_env_check_state_equal",
 | |
|     # torch.fx.experimental.sym_node
 | |
|     "ceil_impl",
 | |
|     "floor_ceil_helper",
 | |
|     "floor_impl",
 | |
|     "method_to_operator",
 | |
|     "sympy_is_channels_last_contiguous_2d",
 | |
|     "sympy_is_channels_last_contiguous_3d",
 | |
|     "sympy_is_channels_last_strides_2d",
 | |
|     "sympy_is_channels_last_strides_3d",
 | |
|     "sympy_is_channels_last_strides_generic",
 | |
|     "sympy_is_contiguous",
 | |
|     "sympy_is_contiguous_generic",
 | |
|     "to_node",
 | |
|     "wrap_node",
 | |
|     "sym_sqrt",
 | |
|     # torch.fx.experimental.symbolic_shapes
 | |
|     "bind_symbols",
 | |
|     "cast_symbool_to_symint_guardless",
 | |
|     "create_contiguous",
 | |
|     "error",
 | |
|     "eval_guards",
 | |
|     "eval_is_non_overlapping_and_dense",
 | |
|     "expect_true",
 | |
|     "find_symbol_binding_fx_nodes",
 | |
|     "free_symbols",
 | |
|     "free_unbacked_symbols",
 | |
|     "fx_placeholder_targets",
 | |
|     "fx_placeholder_vals",
 | |
|     "guard_bool",
 | |
|     "guard_float",
 | |
|     "guard_int",
 | |
|     "guard_scalar",
 | |
|     "has_hint",
 | |
|     "has_symbolic_sizes_strides",
 | |
|     "is_channels_last_contiguous_2d",
 | |
|     "is_channels_last_contiguous_3d",
 | |
|     "is_channels_last_strides_2d",
 | |
|     "is_channels_last_strides_3d",
 | |
|     "is_contiguous",
 | |
|     "is_non_overlapping_and_dense_indicator",
 | |
|     "is_nested_int",
 | |
|     "is_symbol_binding_fx_node",
 | |
|     "is_symbolic",
 | |
|     # torch.fx.experimental.unification.core
 | |
|     "reify",
 | |
|     # torch.fx.experimental.unification.match
 | |
|     "edge",
 | |
|     "match",
 | |
|     "ordering",
 | |
|     "supercedes",
 | |
|     # torch.fx.experimental.unification.more
 | |
|     "reify_object",
 | |
|     "unifiable",
 | |
|     "unify_object",
 | |
|     # torch.fx.experimental.unification.multipledispatch.conflict
 | |
|     "ambiguities",
 | |
|     "ambiguous",
 | |
|     "consistent",
 | |
|     "edge",
 | |
|     "ordering",
 | |
|     "super_signature",
 | |
|     "supercedes",
 | |
|     # torch.fx.experimental.unification.multipledispatch.core
 | |
|     "dispatch",
 | |
|     "ismethod",
 | |
|     # torch.fx.experimental.unification.multipledispatch.dispatcher
 | |
|     "ambiguity_warn",
 | |
|     "halt_ordering",
 | |
|     "restart_ordering",
 | |
|     "source",
 | |
|     "str_signature",
 | |
|     "variadic_signature_matches",
 | |
|     "variadic_signature_matches_iter",
 | |
|     "warning_text",
 | |
|     # torch.fx.experimental.unification.multipledispatch.utils
 | |
|     "expand_tuples",
 | |
|     "groupby",
 | |
|     "raises",
 | |
|     "reverse_dict",
 | |
|     # torch.fx.experimental.unification.multipledispatch.variadic
 | |
|     "isvariadic",
 | |
|     # torch.fx.experimental.unification.unification_tools
 | |
|     "assoc",
 | |
|     "assoc_in",
 | |
|     "dissoc",
 | |
|     "first",
 | |
|     "get_in",
 | |
|     "getter",
 | |
|     "groupby",
 | |
|     "itemfilter",
 | |
|     "itemmap",
 | |
|     "keyfilter",
 | |
|     "keymap",
 | |
|     "merge",
 | |
|     "merge_with",
 | |
|     "update_in",
 | |
|     "valfilter",
 | |
|     "valmap",
 | |
|     # torch.fx.experimental.unification.utils
 | |
|     "freeze",
 | |
|     "hashable",
 | |
|     "raises",
 | |
|     "reverse_dict",
 | |
|     "transitive_get",
 | |
|     "xfail",
 | |
|     # torch.fx.experimental.unification.variable
 | |
|     "var",
 | |
|     "vars",
 | |
|     # torch.fx.experimental.unify_refinements
 | |
|     "check_for_type_equality",
 | |
|     "convert_eq",
 | |
|     "infer_symbolic_types",
 | |
|     "infer_symbolic_types_single_pass",
 | |
|     "substitute_all_types",
 | |
|     "substitute_solution_one_type",
 | |
|     "unify_eq",
 | |
|     # torch.fx.experimental.validator
 | |
|     "bisect",
 | |
|     "translation_validation_enabled",
 | |
|     "translation_validation_timeout",
 | |
|     "z3op",
 | |
|     "z3str",
 | |
|     # torch.fx.graph_module
 | |
|     "reduce_graph_module",
 | |
|     "reduce_package_graph_module",
 | |
|     # torch.fx.node
 | |
|     "has_side_effect",
 | |
|     "map_aggregate",
 | |
|     "map_arg",
 | |
|     # torch.fx.operator_schemas
 | |
|     "check_for_mutable_operation",
 | |
|     "create_type_hint",
 | |
|     "get_signature_for_torch_op",
 | |
|     "normalize_function",
 | |
|     "normalize_module",
 | |
|     "type_matches",
 | |
|     # torch.fx.passes.annotate_getitem_nodes
 | |
|     "annotate_getitem_nodes",
 | |
|     # torch.fx.passes.backends.cudagraphs
 | |
|     "partition_cudagraphs",
 | |
|     # torch.fx.passes.dialect.common.cse_pass
 | |
|     "get_CSE_banned_ops",
 | |
|     # torch.fx.passes.graph_manipulation
 | |
|     "get_size_of_all_nodes",
 | |
|     "get_size_of_node",
 | |
|     "get_tensor_meta",
 | |
|     "replace_target_nodes_with",
 | |
|     # torch.fx.passes.infra.pass_manager
 | |
|     "pass_result_wrapper",
 | |
|     "this_before_that_pass_constraint",
 | |
|     # torch.fx.passes.operator_support
 | |
|     "any_chain",
 | |
|     "chain",
 | |
|     "create_op_support",
 | |
|     # torch.fx.passes.param_fetch
 | |
|     "default_matching",
 | |
|     "extract_attrs_for_lowering",
 | |
|     "lift_lowering_attrs_to_nodes",
 | |
|     # torch.fx.passes.pass_manager
 | |
|     "inplace_wrapper",
 | |
|     "log_hook",
 | |
|     "loop_pass",
 | |
|     "these_before_those_pass_constraint",
 | |
|     "this_before_that_pass_constraint",
 | |
|     # torch.fx.passes.regional_inductor
 | |
|     "regional_inductor",
 | |
|     # torch.fx.passes.reinplace
 | |
|     "reinplace",
 | |
|     # torch.fx.passes.split_module
 | |
|     "split_module",
 | |
|     # torch.fx.passes.split_utils
 | |
|     "getattr_recursive",
 | |
|     "setattr_recursive",
 | |
|     "split_by_tags",
 | |
|     # torch.fx.passes.splitter_base
 | |
|     "generate_inputs_for_submodules",
 | |
|     # torch.fx.passes.tools_common
 | |
|     "get_acc_ops_name",
 | |
|     "get_node_target",
 | |
|     "is_node_output_tensor",
 | |
|     "legalize_graph",
 | |
|     # torch.fx.passes.utils.common
 | |
|     "compare_graphs",
 | |
|     "lift_subgraph_as_module",
 | |
|     # torch.fx.passes.utils.fuser_utils
 | |
|     "erase_nodes",
 | |
|     "fuse_as_graphmodule",
 | |
|     "fuse_by_partitions",
 | |
|     "insert_subgm",
 | |
|     "topo_sort",
 | |
|     "validate_partition",
 | |
|     # torch.fx.passes.utils.source_matcher_utils
 | |
|     "check_subgraphs_connected",
 | |
|     "get_source_partitions",
 | |
|     # torch.fx.proxy
 | |
|     "assert_fn",
 | |
|     # torch.fx.subgraph_rewriter
 | |
|     "replace_pattern",
 | |
|     "replace_pattern_with_filters",
 | |
|     # torch.fx.tensor_type
 | |
|     "is_consistent",
 | |
|     "is_more_precise",
 | |
|     # torch.fx.traceback
 | |
|     "format_stack",
 | |
|     "get_current_meta",
 | |
|     "has_preserved_node_meta",
 | |
|     "preserve_node_meta",
 | |
|     "reset_grad_fn_seq_nr",
 | |
|     "set_current_meta",
 | |
|     "set_grad_fn_seq_nr",
 | |
|     "set_stack_trace",
 | |
|     # torch.jit.annotations
 | |
|     "ann_to_type",
 | |
|     "check_fn",
 | |
|     "get_enum_value_type",
 | |
|     "get_param_names",
 | |
|     "get_signature",
 | |
|     "get_type_line",
 | |
|     "is_function_or_method",
 | |
|     "is_tensor",
 | |
|     "is_vararg",
 | |
|     "parse_type_line",
 | |
|     "split_type_line",
 | |
|     "try_ann_to_type",
 | |
|     "try_real_annotations",
 | |
|     # torch.jit.frontend
 | |
|     "build_class_def",
 | |
|     "build_def",
 | |
|     "build_ignore_context_manager",
 | |
|     "build_param",
 | |
|     "build_param_list",
 | |
|     "build_stmts",
 | |
|     "build_withitems",
 | |
|     "find_before",
 | |
|     "get_class_assigns",
 | |
|     "get_class_properties",
 | |
|     "get_default_args",
 | |
|     "get_default_args_for_class",
 | |
|     "get_jit_class_def",
 | |
|     "get_jit_def",
 | |
|     "is_reserved_name",
 | |
|     "is_torch_jit_ignore_context_manager",
 | |
|     # torch.jit.generate_bytecode
 | |
|     "format_bytecode",
 | |
|     "generate_upgraders_bytecode",
 | |
|     # torch.jit.quantized
 | |
|     "apply_permutation",
 | |
|     "quantize_linear_modules",
 | |
|     "quantize_rnn_cell_modules",
 | |
|     "quantize_rnn_modules",
 | |
|     # torch.library
 | |
|     "define",
 | |
|     "get_ctx",
 | |
|     "impl",
 | |
|     "impl_abstract",
 | |
|     # torch.masked.maskedtensor.core
 | |
|     "is_masked_tensor",
 | |
|     # torch.masked.maskedtensor.creation
 | |
|     "as_masked_tensor",
 | |
|     "masked_tensor",
 | |
|     # torch.multiprocessing.pool
 | |
|     "clean_worker",
 | |
|     # torch.multiprocessing.reductions
 | |
|     "fd_id",
 | |
|     "init_reductions",
 | |
|     "rebuild_cuda_tensor",
 | |
|     "rebuild_meta_tensor",
 | |
|     "rebuild_event",
 | |
|     "rebuild_nested_tensor",
 | |
|     "rebuild_sparse_coo_tensor",
 | |
|     "rebuild_sparse_compressed_tensor",
 | |
|     "rebuild_storage_empty",
 | |
|     "rebuild_storage_fd",
 | |
|     "rebuild_storage_filename",
 | |
|     "rebuild_tensor",
 | |
|     "rebuild_typed_storage",
 | |
|     "rebuild_typed_storage_child",
 | |
|     "reduce_event",
 | |
|     "reduce_storage",
 | |
|     "reduce_tensor",
 | |
|     "reduce_typed_storage",
 | |
|     "reduce_typed_storage_child",
 | |
|     "storage_from_cache",
 | |
|     # torch.multiprocessing.spawn
 | |
|     "start_processes",
 | |
|     # torch.nn.functional
 | |
|     "adaptive_max_pool1d_with_indices",  # documented as adaptive_max_pool1d
 | |
|     "adaptive_max_pool2d_with_indices",  # documented as adaptive_max_pool2d
 | |
|     "adaptive_max_pool3d_with_indices",  # documented as adaptive_max_pool3d
 | |
|     "assert_int_or_pair",  # looks unintentionally public
 | |
|     "fractional_max_pool2d_with_indices",  # documented as fractional_max_pool2d
 | |
|     "fractional_max_pool3d_with_indices",  # documented as fractional_max_pool3d
 | |
|     "max_pool1d_with_indices",  # documented as max_pool1d
 | |
|     "max_pool2d_with_indices",  # documented as max_pool2d
 | |
|     "max_pool3d_with_indices",  # documented as max_pool3d
 | |
|     "multi_head_attention_forward",
 | |
|     # torch.nn.grad
 | |
|     "conv1d_input",  # legacy helper for gradient computation
 | |
|     "conv1d_weight",  # legacy helper for gradient computation
 | |
|     "conv2d_input",  # legacy helper for gradient computation
 | |
|     "conv2d_weight",  # legacy helper for gradient computation
 | |
|     "conv3d_input",  # legacy helper for gradient computation
 | |
|     "conv3d_weight",  # legacy helper for gradient computation
 | |
|     # torch.nn.init
 | |
|     "constant",  # deprecated
 | |
|     "dirac",  # deprecated
 | |
|     "eye",  # deprecated
 | |
|     "kaiming_normal",  # deprecated
 | |
|     "kaiming_uniform",  # deprecated
 | |
|     "normal",  # deprecated
 | |
|     "orthogonal",  # deprecated
 | |
|     "sparse",  # deprecated
 | |
|     "uniform",  # deprecated
 | |
|     "xavier_normal",  # deprecated
 | |
|     "xavier_uniform",  # deprecated
 | |
|     # torch.nn.modules.rnn
 | |
|     "apply_permutation",  # deprecated
 | |
|     # torch.nn.modules.utils
 | |
|     "consume_prefix_in_state_dict_if_present",
 | |
|     # torch.nn.parallel.comm
 | |
|     "broadcast",
 | |
|     "broadcast_coalesced",
 | |
|     "gather",
 | |
|     "reduce_add",
 | |
|     "reduce_add_coalesced",
 | |
|     "scatter",
 | |
|     # torch.nn.parallel.data_parallel
 | |
|     "data_parallel",
 | |
|     # torch.nn.parallel.parallel_apply
 | |
|     "get_a_var",
 | |
|     "parallel_apply",
 | |
|     # torch.nn.parallel.replicate
 | |
|     "replicate",
 | |
|     # torch.nn.parallel.scatter_gather
 | |
|     "gather",
 | |
|     "is_namedtuple",
 | |
|     "scatter",
 | |
|     "scatter_kwargs",
 | |
|     # torch.nn.utils.rnn
 | |
|     "bind",  # looks unintentionally public
 | |
|     # torch.onnx.operators
 | |
|     "reshape_from_tensor_shape",
 | |
|     "shape_as_tensor",
 | |
|     # torch.onnx.symbolic_caffe2
 | |
|     "add",
 | |
|     "avg_pool2d",
 | |
|     "cat",
 | |
|     "conv2d",
 | |
|     "conv2d_relu",
 | |
|     "conv_prepack",
 | |
|     "dequantize",
 | |
|     "linear",
 | |
|     "linear_prepack",
 | |
|     "max_pool2d",
 | |
|     "nchw2nhwc",
 | |
|     "nhwc2nchw",
 | |
|     "quantize_per_tensor",
 | |
|     "register_quantized_ops",
 | |
|     "relu",
 | |
|     "reshape",
 | |
|     "sigmoid",
 | |
|     "slice",
 | |
|     "upsample_nearest2d",
 | |
|     # torch.onnx.symbolic_helper
 | |
|     "args_have_same_dtype",
 | |
|     "check_training_mode",
 | |
|     "dequantize_helper",
 | |
|     "is_complex_value",
 | |
|     "quantize_helper",
 | |
|     "quantized_args",
 | |
|     "requantize_bias_helper",
 | |
|     # torch.onnx.symbolic_opset10
 | |
|     "dequantize",
 | |
|     "div",
 | |
|     "embedding_bag",
 | |
|     "fake_quantize_per_tensor_affine",
 | |
|     "flip",
 | |
|     "fmod",
 | |
|     "isfinite",
 | |
|     "isinf",
 | |
|     "nan_to_num",
 | |
|     "quantize_per_tensor",
 | |
|     "quantized_add",
 | |
|     "quantized_add_relu",
 | |
|     "quantized_cat",
 | |
|     "quantized_conv1d",
 | |
|     "quantized_conv1d_relu",
 | |
|     "quantized_conv2d",
 | |
|     "quantized_conv2d_relu",
 | |
|     "quantized_conv3d",
 | |
|     "quantized_conv3d_relu",
 | |
|     "quantized_conv_transpose1d",
 | |
|     "quantized_conv_transpose2d",
 | |
|     "quantized_conv_transpose3d",
 | |
|     "quantized_group_norm",
 | |
|     "quantized_hardswish",
 | |
|     "quantized_instance_norm",
 | |
|     "quantized_layer_norm",
 | |
|     "quantized_leaky_relu",
 | |
|     "quantized_linear",
 | |
|     "quantized_linear_relu",
 | |
|     "quantized_mul",
 | |
|     "quantized_sigmoid",
 | |
|     "slice",
 | |
|     "sort",
 | |
|     "topk",
 | |
|     # torch.onnx.symbolic_opset11
 | |
|     "Delete",
 | |
|     "add",
 | |
|     "append",
 | |
|     "arange",
 | |
|     "argsort",
 | |
|     "atleast_1d",
 | |
|     "atleast_2d",
 | |
|     "atleast_3d",
 | |
|     "cat",
 | |
|     "chunk",
 | |
|     "clamp",
 | |
|     "clamp_max",
 | |
|     "clamp_min",
 | |
|     "constant_pad_nd",
 | |
|     "cumsum",
 | |
|     "embedding_bag",
 | |
|     "embedding_renorm",
 | |
|     "flatten",
 | |
|     "gather",
 | |
|     "hardtanh",
 | |
|     "hstack",
 | |
|     "im2col",
 | |
|     "index",
 | |
|     "index_copy",
 | |
|     "index_fill",
 | |
|     "index_put",
 | |
|     "insert",
 | |
|     "linalg_det",
 | |
|     "linalg_vector_norm",
 | |
|     "logdet",
 | |
|     "masked_scatter",
 | |
|     "masked_select",
 | |
|     "mm",
 | |
|     "narrow",
 | |
|     "normal",
 | |
|     "pad",
 | |
|     "pixel_shuffle",
 | |
|     "pop",
 | |
|     "prim_constant_chunk",
 | |
|     "reflection_pad",
 | |
|     "relu6",
 | |
|     "remainder",
 | |
|     "replication_pad",
 | |
|     "round",
 | |
|     "scatter",
 | |
|     "select",
 | |
|     "size",
 | |
|     "sort",
 | |
|     "split",
 | |
|     "split_with_sizes",
 | |
|     "squeeze",
 | |
|     "stack",
 | |
|     "topk",
 | |
|     "unbind",
 | |
|     "unique_dim",
 | |
|     "unsqueeze",
 | |
|     "vstack",
 | |
|     # torch.onnx.symbolic_opset12
 | |
|     "argmax",
 | |
|     "argmin",
 | |
|     "binary_cross_entropy_with_logits",
 | |
|     "celu",
 | |
|     "cross_entropy_loss",
 | |
|     "dropout",
 | |
|     "einsum",
 | |
|     "ge",
 | |
|     "le",
 | |
|     "native_dropout",
 | |
|     "nll_loss",
 | |
|     "nll_loss2d",
 | |
|     "nll_loss_nd",
 | |
|     "outer",
 | |
|     "pow",
 | |
|     "tensordot",
 | |
|     "unfold",
 | |
|     # torch.onnx.symbolic_opset13
 | |
|     "diagonal",
 | |
|     "fake_quantize_per_channel_affine",
 | |
|     "fake_quantize_per_tensor_affine",
 | |
|     "frobenius_norm",
 | |
|     "log_softmax",
 | |
|     "nonzero_numpy",
 | |
|     "quantized_conv1d",
 | |
|     "quantized_conv1d_relu",
 | |
|     "quantized_conv2d",
 | |
|     "quantized_conv2d_relu",
 | |
|     "quantized_conv3d",
 | |
|     "quantized_conv3d_relu",
 | |
|     "quantized_conv_transpose1d",
 | |
|     "quantized_conv_transpose2d",
 | |
|     "quantized_conv_transpose3d",
 | |
|     "quantized_linear",
 | |
|     "quantized_linear_relu",
 | |
|     "repeat_interleave",
 | |
|     "softmax",
 | |
|     "split",
 | |
|     "split_with_sizes",
 | |
|     "tensor_split",
 | |
|     "tile",
 | |
|     "unbind",
 | |
|     "unflatten",
 | |
|     "unsafe_chunk",
 | |
|     "unsafe_split",
 | |
|     "unsafe_split_with_sizes",
 | |
|     "where",
 | |
|     # torch.onnx.symbolic_opset14
 | |
|     "batch_norm",
 | |
|     "hardswish",
 | |
|     "quantized_hardswish",
 | |
|     "reshape",
 | |
|     "scaled_dot_product_attention",
 | |
|     "tril",
 | |
|     "triu",
 | |
|     # torch.onnx.symbolic_opset15
 | |
|     "aten__is_",
 | |
|     "aten__isnot_",
 | |
|     "bernoulli",
 | |
|     "prim_unchecked_cast",
 | |
|     # torch.onnx.symbolic_opset16
 | |
|     "grid_sampler",
 | |
|     "scatter_add",
 | |
|     "scatter_reduce",
 | |
|     # torch.onnx.symbolic_opset17
 | |
|     "layer_norm",
 | |
|     "stft",
 | |
|     # torch.onnx.symbolic_opset18
 | |
|     "col2im",
 | |
|     # torch.onnx.symbolic_opset7
 | |
|     "max",
 | |
|     "min",
 | |
|     # torch.onnx.symbolic_opset8
 | |
|     "addmm",
 | |
|     "bmm",
 | |
|     "empty",
 | |
|     "empty_like",
 | |
|     "flatten",
 | |
|     "full",
 | |
|     "full_like",
 | |
|     "gt",
 | |
|     "lt",
 | |
|     "matmul",
 | |
|     "mm",
 | |
|     "ones",
 | |
|     "ones_like",
 | |
|     "prelu",
 | |
|     "repeat",
 | |
|     "zeros",
 | |
|     "zeros_like",
 | |
|     # torch.onnx.symbolic_opset9
 | |
|     "abs",
 | |
|     "acos",
 | |
|     "adaptive_avg_pool1d",
 | |
|     "adaptive_avg_pool2d",
 | |
|     "adaptive_avg_pool3d",
 | |
|     "adaptive_max_pool1d",
 | |
|     "adaptive_max_pool2d",
 | |
|     "adaptive_max_pool3d",
 | |
|     "add",
 | |
|     "addcmul",
 | |
|     "addmm",
 | |
|     "alias",
 | |
|     "amax",
 | |
|     "amin",
 | |
|     "aminmax",
 | |
|     "arange",
 | |
|     "argmax",
 | |
|     "argmin",
 | |
|     "as_strided",
 | |
|     "as_tensor",
 | |
|     "asin",
 | |
|     "atan",
 | |
|     "atan2",
 | |
|     "avg_pool1d",
 | |
|     "avg_pool2d",
 | |
|     "avg_pool3d",
 | |
|     "baddbmm",
 | |
|     "batch_norm",
 | |
|     "bernoulli",
 | |
|     "bitwise_not",
 | |
|     "bitwise_or",
 | |
|     "bmm",
 | |
|     "broadcast_tensors",
 | |
|     "broadcast_to",
 | |
|     "bucketize",
 | |
|     "cat",
 | |
|     "cdist",
 | |
|     "ceil",
 | |
|     "clamp",
 | |
|     "clamp_max",
 | |
|     "clamp_min",
 | |
|     "clone",
 | |
|     "constant_pad_nd",
 | |
|     "contiguous",
 | |
|     "conv1d",
 | |
|     "conv2d",
 | |
|     "conv3d",
 | |
|     "conv_tbc",
 | |
|     "conv_transpose1d",
 | |
|     "conv_transpose2d",
 | |
|     "conv_transpose3d",
 | |
|     "convert_element_type",
 | |
|     "convolution",
 | |
|     "cos",
 | |
|     "cosine_similarity",
 | |
|     "cross",
 | |
|     "cumsum",
 | |
|     "detach",
 | |
|     "dim",
 | |
|     "div",
 | |
|     "dot",
 | |
|     "dropout",
 | |
|     "elu",
 | |
|     "embedding",
 | |
|     "embedding_bag",
 | |
|     "empty",
 | |
|     "empty_like",
 | |
|     "eq",
 | |
|     "erf",
 | |
|     "exp",
 | |
|     "expand",
 | |
|     "expand_as",
 | |
|     "eye",
 | |
|     "fill",
 | |
|     "flatten",
 | |
|     "floor",
 | |
|     "floor_divide",
 | |
|     "floordiv",
 | |
|     "frobenius_norm",
 | |
|     "full",
 | |
|     "full_like",
 | |
|     "gather",
 | |
|     "ge",
 | |
|     "gelu",
 | |
|     "get_pool_ceil_padding",
 | |
|     "glu",
 | |
|     "group_norm",
 | |
|     "gru",
 | |
|     "gt",
 | |
|     "hann_window",
 | |
|     "hardshrink",
 | |
|     "hardsigmoid",
 | |
|     "hardswish",
 | |
|     "hardtanh",
 | |
|     "index",
 | |
|     "index_add",
 | |
|     "index_copy",
 | |
|     "index_fill",
 | |
|     "index_put",
 | |
|     "index_select",
 | |
|     "instance_norm",
 | |
|     "is_floating_point",
 | |
|     "is_pinned",
 | |
|     "isnan",
 | |
|     "item",
 | |
|     "kl_div",
 | |
|     "layer_norm",
 | |
|     "le",
 | |
|     "leaky_relu",
 | |
|     "lerp",
 | |
|     "lift",
 | |
|     "linalg_cross",
 | |
|     "linalg_matrix_norm",
 | |
|     "linalg_norm",
 | |
|     "linalg_vector_norm",
 | |
|     "linear",
 | |
|     "linspace",
 | |
|     "log",
 | |
|     "log10",
 | |
|     "log1p",
 | |
|     "log2",
 | |
|     "log_sigmoid",
 | |
|     "log_softmax",
 | |
|     "logical_and",
 | |
|     "logical_not",
 | |
|     "logical_or",
 | |
|     "logical_xor",
 | |
|     "logit",
 | |
|     "logsumexp",
 | |
|     "lstm",
 | |
|     "lstm_cell",
 | |
|     "lt",
 | |
|     "masked_fill",
 | |
|     "masked_fill_",
 | |
|     "matmul",
 | |
|     "max",
 | |
|     "max_pool1d",
 | |
|     "max_pool1d_with_indices",
 | |
|     "max_pool2d",
 | |
|     "max_pool2d_with_indices",
 | |
|     "max_pool3d",
 | |
|     "max_pool3d_with_indices",
 | |
|     "maximum",
 | |
|     "meshgrid",
 | |
|     "min",
 | |
|     "minimum",
 | |
|     "mish",
 | |
|     "mm",
 | |
|     "movedim",
 | |
|     "mse_loss",
 | |
|     "mul",
 | |
|     "multinomial",
 | |
|     "mv",
 | |
|     "narrow",
 | |
|     "native_layer_norm",
 | |
|     "ne",
 | |
|     "neg",
 | |
|     "new_empty",
 | |
|     "new_full",
 | |
|     "new_ones",
 | |
|     "new_zeros",
 | |
|     "nonzero",
 | |
|     "nonzero_numpy",
 | |
|     "noop_complex_operators",
 | |
|     "norm",
 | |
|     "numel",
 | |
|     "numpy_T",
 | |
|     "one_hot",
 | |
|     "ones",
 | |
|     "ones_like",
 | |
|     "onnx_placeholder",
 | |
|     "overload_by_arg_count",
 | |
|     "pad",
 | |
|     "pairwise_distance",
 | |
|     "permute",
 | |
|     "pixel_shuffle",
 | |
|     "pixel_unshuffle",
 | |
|     "pow",
 | |
|     "prelu",
 | |
|     "prim_constant",
 | |
|     "prim_constant_chunk",
 | |
|     "prim_constant_split",
 | |
|     "prim_data",
 | |
|     "prim_device",
 | |
|     "prim_dtype",
 | |
|     "prim_if",
 | |
|     "prim_layout",
 | |
|     "prim_list_construct",
 | |
|     "prim_list_unpack",
 | |
|     "prim_loop",
 | |
|     "prim_max",
 | |
|     "prim_min",
 | |
|     "prim_shape",
 | |
|     "prim_tolist",
 | |
|     "prim_tuple_construct",
 | |
|     "prim_type",
 | |
|     "prim_unchecked_cast",
 | |
|     "prim_uninitialized",
 | |
|     "rand",
 | |
|     "rand_like",
 | |
|     "randint",
 | |
|     "randint_like",
 | |
|     "randn",
 | |
|     "randn_like",
 | |
|     "reciprocal",
 | |
|     "reflection_pad",
 | |
|     "relu",
 | |
|     "relu6",
 | |
|     "remainder",
 | |
|     "repeat",
 | |
|     "repeat_interleave",
 | |
|     "replication_pad",
 | |
|     "reshape",
 | |
|     "reshape_as",
 | |
|     "rnn_relu",
 | |
|     "rnn_tanh",
 | |
|     "roll",
 | |
|     "rrelu",
 | |
|     "rsqrt",
 | |
|     "rsub",
 | |
|     "scalar_tensor",
 | |
|     "scatter",
 | |
|     "scatter_add",
 | |
|     "select",
 | |
|     "selu",
 | |
|     "sigmoid",
 | |
|     "sign",
 | |
|     "silu",
 | |
|     "sin",
 | |
|     "size",
 | |
|     "slice",
 | |
|     "softmax",
 | |
|     "softplus",
 | |
|     "softshrink",
 | |
|     "sort",
 | |
|     "split",
 | |
|     "split_with_sizes",
 | |
|     "sqrt",
 | |
|     "square",
 | |
|     "squeeze",
 | |
|     "stack",
 | |
|     "std",
 | |
|     "std_mean",
 | |
|     "sub",
 | |
|     "t",
 | |
|     "take",
 | |
|     "tan",
 | |
|     "tanh",
 | |
|     "tanhshrink",
 | |
|     "tensor",
 | |
|     "threshold",
 | |
|     "to",
 | |
|     "topk",
 | |
|     "transpose",
 | |
|     "true_divide",
 | |
|     "type_as",
 | |
|     "unbind",
 | |
|     "unfold",
 | |
|     "unsafe_chunk",
 | |
|     "unsafe_split",
 | |
|     "unsafe_split_with_sizes",
 | |
|     "unsqueeze",
 | |
|     "unsupported_complex_operators",
 | |
|     "unused",
 | |
|     "upsample_bilinear2d",
 | |
|     "upsample_linear1d",
 | |
|     "upsample_nearest1d",
 | |
|     "upsample_nearest2d",
 | |
|     "upsample_nearest3d",
 | |
|     "upsample_trilinear3d",
 | |
|     "var",
 | |
|     "var_mean",
 | |
|     "view",
 | |
|     "view_as",
 | |
|     "where",
 | |
|     "wrap_logical_op_with_cast_to",
 | |
|     "wrap_logical_op_with_negation",
 | |
|     "zero",
 | |
|     "zeros",
 | |
|     "zeros_like",
 | |
|     # torch.onnx.utils
 | |
|     "disable_apex_o2_state_dict_hook",
 | |
|     "export",
 | |
|     "export_to_pretty_string",
 | |
|     "exporter_context",
 | |
|     "is_in_onnx_export",
 | |
|     "model_signature",
 | |
|     "register_custom_op_symbolic",
 | |
|     "select_model_mode_for_export",
 | |
|     "setup_onnx_logging",
 | |
|     "unconvertible_ops",
 | |
|     "unpack_quantized_tensor",
 | |
|     "warn_on_static_input_change",
 | |
|     # torch.onnx.verification
 | |
|     "check_export_model_diff",
 | |
|     "verify",
 | |
|     "verify_aten_graph",
 | |
|     # torch.optim.optimizer
 | |
|     "register_optimizer_step_post_hook",
 | |
|     "register_optimizer_step_pre_hook",
 | |
|     # torch.overrides
 | |
|     "enable_reentrant_dispatch",
 | |
|     # torch.package.analyze.find_first_use_of_broken_modules
 | |
|     "find_first_use_of_broken_modules",
 | |
|     # torch.package.analyze.is_from_package
 | |
|     "is_from_package",
 | |
|     # torch.package.analyze.trace_dependencies
 | |
|     "trace_dependencies",
 | |
|     # torch.profiler.itt
 | |
|     "range",
 | |
|     # torch.profiler.profiler
 | |
|     "schedule",
 | |
|     "supported_activities",
 | |
|     "tensorboard_trace_handler",
 | |
|     # torch.return_types
 | |
|     "pytree_register_structseq",
 | |
|     # torch.serialization
 | |
|     "check_module_version_greater_or_equal",
 | |
|     "default_restore_location",
 | |
|     "load",
 | |
|     "location_tag",
 | |
|     "mkdtemp",
 | |
|     "normalize_storage_type",
 | |
|     "save",
 | |
|     "storage_to_tensor_type",
 | |
|     "validate_cuda_device",
 | |
|     "validate_hpu_device",
 | |
|     # torch.signal.windows.windows
 | |
|     "bartlett",
 | |
|     "blackman",
 | |
|     "cosine",
 | |
|     "exponential",
 | |
|     "gaussian",
 | |
|     "general_cosine",
 | |
|     "general_hamming",
 | |
|     "hamming",
 | |
|     "hann",
 | |
|     "kaiser",
 | |
|     "nuttall",
 | |
|     # torch.sparse.semi_structured
 | |
|     "to_sparse_semi_structured",
 | |
|     # torch.utils.backend_registration
 | |
|     "generate_methods_for_privateuse1_backend",
 | |
|     "rename_privateuse1_backend",
 | |
|     # torch.utils.benchmark.examples.op_benchmark
 | |
|     "assert_dicts_equal",
 | |
|     # torch.utils.benchmark.op_fuzzers.spectral
 | |
|     "power_range",
 | |
|     # torch.utils.benchmark.utils.common
 | |
|     "ordered_unique",
 | |
|     "select_unit",
 | |
|     "set_torch_threads",
 | |
|     "trim_sigfig",
 | |
|     "unit_to_english",
 | |
|     # torch.utils.benchmark.utils.compare
 | |
|     "optional_min",
 | |
|     # torch.utils.benchmark.utils.compile
 | |
|     "bench_all",
 | |
|     "bench_loop",
 | |
|     "benchmark_compile",
 | |
|     # torch.utils.benchmark.utils.cpp_jit
 | |
|     "compile_callgrind_template",
 | |
|     "compile_timeit_template",
 | |
|     "get_compat_bindings",
 | |
|     # torch.utils.benchmark.utils.fuzzer
 | |
|     "dtype_size",
 | |
|     "prod",
 | |
|     # torch.utils.benchmark.utils.timer
 | |
|     "timer",
 | |
|     # torch.utils.benchmark.utils.valgrind_wrapper.timer_interface
 | |
|     "wrapper_singleton",
 | |
|     # torch.utils.bundled_inputs
 | |
|     "augment_many_model_functions_with_bundled_inputs",
 | |
|     "augment_model_with_bundled_inputs",
 | |
|     "bundle_inputs",
 | |
|     "bundle_large_tensor",
 | |
|     "bundle_randn",
 | |
|     # torch.utils.checkpoint
 | |
|     "check_backward_validity",
 | |
|     "detach_variable",
 | |
|     "get_device_states",
 | |
|     "noop_context_fn",
 | |
|     "set_checkpoint_early_stop",
 | |
|     "set_device_states",
 | |
|     # torch.utils.collect_env
 | |
|     "check_release_file",
 | |
|     "get_cachingallocator_config",
 | |
|     "get_clang_version",
 | |
|     "get_cmake_version",
 | |
|     "get_conda_packages",
 | |
|     "get_cpu_info",
 | |
|     "get_cuda_module_loading_config",
 | |
|     "get_cudnn_version",
 | |
|     "get_env_info",
 | |
|     "get_gcc_version",
 | |
|     "get_gpu_info",
 | |
|     "get_libc_version",
 | |
|     "get_lsb_version",
 | |
|     "get_mac_version",
 | |
|     "get_nvidia_driver_version",
 | |
|     "get_nvidia_smi",
 | |
|     "get_os",
 | |
|     "get_pip_packages",
 | |
|     "get_platform",
 | |
|     "get_pretty_env_info",
 | |
|     "get_python_platform",
 | |
|     "get_running_cuda_version",
 | |
|     "get_windows_version",
 | |
|     "is_xnnpack_available",
 | |
|     "pretty_str",
 | |
|     # torch.utils.cpp_backtrace
 | |
|     "get_cpp_backtrace",
 | |
|     # torch.utils.cpp_extension
 | |
|     "check_compiler_is_gcc",
 | |
|     "check_compiler_ok_for_platform",
 | |
|     "get_cxx_compiler",
 | |
|     "get_default_build_root",
 | |
|     "library_paths",
 | |
|     "remove_extension_h_precompiler_headers",
 | |
|     # torch.utils.data.backward_compatibility
 | |
|     "worker_init_fn",
 | |
|     # torch.utils.data.datapipes.dataframe.dataframe_wrapper
 | |
|     "concat",
 | |
|     "create_dataframe",
 | |
|     "get_columns",
 | |
|     "get_df_wrapper",
 | |
|     "get_item",
 | |
|     "get_len",
 | |
|     "is_column",
 | |
|     "is_dataframe",
 | |
|     "iterate",
 | |
|     "set_df_wrapper",
 | |
|     # torch.utils.data.datapipes.dataframe.dataframes
 | |
|     "disable_capture",
 | |
|     "get_val",
 | |
|     # torch.utils.data.datapipes.gen_pyi
 | |
|     "extract_class_name",
 | |
|     "extract_method_name",
 | |
|     "find_file_paths",
 | |
|     "gen_from_template",
 | |
|     "get_method_definitions",
 | |
|     "materialize_lines",
 | |
|     "parse_datapipe_file",
 | |
|     "parse_datapipe_files",
 | |
|     "process_signature",
 | |
|     "split_outside_bracket",
 | |
|     # torch.utils.data.datapipes.map.callable
 | |
|     "default_fn",
 | |
|     # torch.utils.data.datapipes.utils.common
 | |
|     "get_file_binaries_from_pathnames",
 | |
|     "get_file_pathnames_from_root",
 | |
|     "match_masks",
 | |
|     "validate_input_col",
 | |
|     "validate_pathname_binary_tuple",
 | |
|     # torch.utils.data.datapipes.utils.decoder
 | |
|     "audiohandler",
 | |
|     "basichandlers",
 | |
|     "extension_extract_fn",
 | |
|     "handle_extension",
 | |
|     "imagehandler",
 | |
|     "mathandler",
 | |
|     "videohandler",
 | |
|     # torch.utils.data.dataset
 | |
|     "random_split",
 | |
|     # torch.utils.data.graph
 | |
|     "traverse",
 | |
|     "traverse_dps",
 | |
|     # torch.utils.data.graph_settings
 | |
|     "apply_random_seed",
 | |
|     "apply_sharding",
 | |
|     "apply_shuffle_seed",
 | |
|     "apply_shuffle_settings",
 | |
|     "get_all_graph_pipes",
 | |
|     # torch.utils.flop_counter
 | |
|     "addmm_flop",
 | |
|     "baddbmm_flop",
 | |
|     "bmm_flop",
 | |
|     "conv_backward_flop",
 | |
|     "conv_flop",
 | |
|     "conv_flop_count",
 | |
|     "convert_num_with_suffix",
 | |
|     "get_shape",
 | |
|     "get_suffix_str",
 | |
|     "mm_flop",
 | |
|     "normalize_tuple",
 | |
|     "register_flop_formula",
 | |
|     "sdpa_backward_flop",
 | |
|     "sdpa_backward_flop_count",
 | |
|     "sdpa_flop",
 | |
|     "sdpa_flop_count",
 | |
|     "shape_wrapper",
 | |
|     "transpose_shape",
 | |
|     # torch.utils.hipify.hipify_python
 | |
|     "add_dim3",
 | |
|     "compute_stats",
 | |
|     "extract_arguments",
 | |
|     "file_add_header",
 | |
|     "file_specific_replacement",
 | |
|     "find_bracket_group",
 | |
|     "find_closure_group",
 | |
|     "find_parentheses_group",
 | |
|     "fix_static_global_kernels",
 | |
|     "get_hip_file_path",
 | |
|     "hip_header_magic",
 | |
|     "hipify",
 | |
|     "is_caffe2_gpu_file",
 | |
|     "is_cusparse_file",
 | |
|     "is_out_of_place",
 | |
|     "is_pytorch_file",
 | |
|     "is_special_file",
 | |
|     "match_extensions",
 | |
|     "matched_files_iter",
 | |
|     "openf",
 | |
|     "preprocess_file_and_save_result",
 | |
|     "preprocessor",
 | |
|     "processKernelLaunches",
 | |
|     "replace_extern_shared",
 | |
|     "replace_math_functions",
 | |
|     "str2bool",
 | |
|     # torch.utils.hooks
 | |
|     "unserializable_hook",
 | |
|     "warn_if_has_hooks",
 | |
|     # torch.utils.jit.log_extract
 | |
|     "extract_ir",
 | |
|     "load_graph_and_inputs",
 | |
|     "make_tensor_from_type",
 | |
|     "no_fuser",
 | |
|     "time_cpu",
 | |
|     "time_cuda",
 | |
|     # torch.utils.mkldnn
 | |
|     "to_mkldnn",
 | |
|     # torch.utils.mobile_optimizer
 | |
|     "generate_mobile_module_lints",
 | |
|     # torch.utils.tensorboard.summary
 | |
|     "audio",
 | |
|     "compute_curve",
 | |
|     "custom_scalars",
 | |
|     "draw_boxes",
 | |
|     "half_to_int",
 | |
|     "histogram",
 | |
|     "histogram_raw",
 | |
|     "hparams",
 | |
|     "image",
 | |
|     "image_boxes",
 | |
|     "int_to_half",
 | |
|     "make_histogram",
 | |
|     "make_image",
 | |
|     "make_video",
 | |
|     "mesh",
 | |
|     "pr_curve",
 | |
|     "pr_curve_raw",
 | |
|     "scalar",
 | |
|     "tensor_proto",
 | |
|     "text",
 | |
|     "video",
 | |
|     # torch.utils.throughput_benchmark
 | |
|     "format_time",
 | |
| ]
 | |
| 
 | |
| coverage_ignore_classes = [
 | |
|     # torch
 | |
|     "FatalError",
 | |
|     "QUInt2x4Storage",
 | |
|     "Size",
 | |
|     "Storage",
 | |
|     "Stream",
 | |
|     "Tensor",
 | |
|     "finfo",
 | |
|     "iinfo",
 | |
|     "qscheme",
 | |
|     "AggregationType",
 | |
|     "AliasDb",
 | |
|     "AnyType",
 | |
|     "Argument",
 | |
|     "ArgumentSpec",
 | |
|     "AwaitType",
 | |
|     "BenchmarkConfig",
 | |
|     "BenchmarkExecutionStats",
 | |
|     "Block",
 | |
|     "BoolType",
 | |
|     "BufferDict",
 | |
|     "CallStack",
 | |
|     "Capsule",
 | |
|     "ClassType",
 | |
|     "Code",
 | |
|     "CompleteArgumentSpec",
 | |
|     "ComplexType",
 | |
|     "ConcreteModuleType",
 | |
|     "ConcreteModuleTypeBuilder",
 | |
|     "DeepCopyMemoTable",
 | |
|     "DeserializationStorageContext",
 | |
|     "DeviceObjType",
 | |
|     "DictType",
 | |
|     "DispatchKey",
 | |
|     "DispatchKeySet",
 | |
|     "EnumType",
 | |
|     "ExcludeDispatchKeyGuard",
 | |
|     "ExecutionPlan",
 | |
|     "FileCheck",
 | |
|     "FloatType",
 | |
|     "FunctionSchema",
 | |
|     "Gradient",
 | |
|     "Graph",
 | |
|     "GraphExecutorState",
 | |
|     "IODescriptor",
 | |
|     "InferredType",
 | |
|     "IntType",
 | |
|     "InterfaceType",
 | |
|     "ListType",
 | |
|     "LockingLogger",
 | |
|     "MobileOptimizerType",
 | |
|     "ModuleDict",
 | |
|     "Node",
 | |
|     "NoneType",
 | |
|     "NoopLogger",
 | |
|     "NumberType",
 | |
|     "OperatorInfo",
 | |
|     "OptionalType",
 | |
|     "ParameterDict",
 | |
|     "PyObjectType",
 | |
|     "PyTorchFileReader",
 | |
|     "PyTorchFileWriter",
 | |
|     "RRefType",
 | |
|     "ScriptClass",
 | |
|     "ScriptClassFunction",
 | |
|     "ScriptDict",
 | |
|     "ScriptDictIterator",
 | |
|     "ScriptDictKeyIterator",
 | |
|     "ScriptList",
 | |
|     "ScriptListIterator",
 | |
|     "ScriptMethod",
 | |
|     "ScriptModule",
 | |
|     "ScriptModuleSerializer",
 | |
|     "ScriptObject",
 | |
|     "ScriptObjectProperty",
 | |
|     "SerializationStorageContext",
 | |
|     "StaticModule",
 | |
|     "StringType",
 | |
|     "SymIntType",
 | |
|     "SymBoolType",
 | |
|     "ThroughputBenchmark",
 | |
|     "TracingState",
 | |
|     "TupleType",
 | |
|     "Type",
 | |
|     "UnionType",
 | |
|     "Use",
 | |
|     "Value",
 | |
|     # torch.cuda
 | |
|     "BFloat16Storage",
 | |
|     "BFloat16Tensor",
 | |
|     "BoolStorage",
 | |
|     "BoolTensor",
 | |
|     "ByteStorage",
 | |
|     "ByteTensor",
 | |
|     "CharStorage",
 | |
|     "CharTensor",
 | |
|     "ComplexDoubleStorage",
 | |
|     "ComplexFloatStorage",
 | |
|     "CudaError",
 | |
|     "DeferredCudaCallError",
 | |
|     "DoubleStorage",
 | |
|     "DoubleTensor",
 | |
|     "FloatStorage",
 | |
|     "FloatTensor",
 | |
|     "HalfStorage",
 | |
|     "HalfTensor",
 | |
|     "IntStorage",
 | |
|     "IntTensor",
 | |
|     "LongStorage",
 | |
|     "LongTensor",
 | |
|     "ShortStorage",
 | |
|     "ShortTensor",
 | |
|     "cudaStatus",
 | |
|     # torch.cuda._sanitizer
 | |
|     "Access",
 | |
|     "AccessType",
 | |
|     "Await",
 | |
|     "CUDASanitizer",
 | |
|     "CUDASanitizerDispatchMode",
 | |
|     "CUDASanitizerErrors",
 | |
|     "EventHandler",
 | |
|     "SynchronizationError",
 | |
|     "UnsynchronizedAccessError",
 | |
|     # torch.distributed.elastic.multiprocessing.errors
 | |
|     "ChildFailedError",
 | |
|     "ProcessFailure",
 | |
|     # torch.distributions.constraints
 | |
|     "cat",
 | |
|     "greater_than",
 | |
|     "greater_than_eq",
 | |
|     "half_open_interval",
 | |
|     "independent",
 | |
|     "integer_interval",
 | |
|     "interval",
 | |
|     "less_than",
 | |
|     "multinomial",
 | |
|     "stack",
 | |
|     # torch.distributions.transforms
 | |
|     "AffineTransform",
 | |
|     "CatTransform",
 | |
|     "ComposeTransform",
 | |
|     "CorrCholeskyTransform",
 | |
|     "CumulativeDistributionTransform",
 | |
|     "ExpTransform",
 | |
|     "IndependentTransform",
 | |
|     "PowerTransform",
 | |
|     "ReshapeTransform",
 | |
|     "SigmoidTransform",
 | |
|     "SoftmaxTransform",
 | |
|     "SoftplusTransform",
 | |
|     "StackTransform",
 | |
|     "StickBreakingTransform",
 | |
|     "TanhTransform",
 | |
|     "Transform",
 | |
|     # torch.jit
 | |
|     "CompilationUnit",
 | |
|     "Error",
 | |
|     "Future",
 | |
|     "ScriptFunction",
 | |
|     # torch.onnx
 | |
|     "CheckerError",
 | |
|     "ExportTypes",
 | |
|     # torch.backends
 | |
|     "ContextProp",
 | |
|     "PropModule",
 | |
|     # torch.backends.cuda
 | |
|     "cuBLASModule",
 | |
|     "cuFFTPlanCache",
 | |
|     "cuFFTPlanCacheAttrContextProp",
 | |
|     "cuFFTPlanCacheManager",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks
 | |
|     "DDPCommHookType",
 | |
|     # torch.jit.mobile
 | |
|     "LiteScriptModule",
 | |
|     # torch.ao.nn.quantized.modules
 | |
|     "DeQuantize",
 | |
|     "Quantize",
 | |
|     # torch.utils.backcompat
 | |
|     "Warning",
 | |
|     # torch.ao.nn.intrinsic.modules.fused
 | |
|     "ConvAdd2d",
 | |
|     "ConvAddReLU2d",
 | |
|     "LinearBn1d",
 | |
|     "LinearLeakyReLU",
 | |
|     "LinearTanh",
 | |
|     # torch.ao.nn.intrinsic.qat.modules.conv_fused
 | |
|     "ConvBnReLU1d",
 | |
|     "ConvBnReLU2d",
 | |
|     "ConvBnReLU3d",
 | |
|     "ConvReLU1d",
 | |
|     "ConvReLU2d",
 | |
|     "ConvReLU3d",
 | |
|     # torch.ao.nn.intrinsic.qat.modules.linear_fused
 | |
|     "LinearBn1d",
 | |
|     # torch.ao.nn.intrinsic.qat.modules.linear_relu
 | |
|     "LinearReLU",
 | |
|     # torch.ao.nn.intrinsic.quantized.dynamic.modules.linear_relu
 | |
|     "LinearReLU",
 | |
|     # torch.ao.nn.intrinsic.quantized.modules.bn_relu
 | |
|     "BNReLU2d",
 | |
|     "BNReLU3d",
 | |
|     # torch.ao.nn.intrinsic.quantized.modules.conv_add
 | |
|     "ConvAdd2d",
 | |
|     "ConvAddReLU2d",
 | |
|     # torch.ao.nn.intrinsic.quantized.modules.conv_relu
 | |
|     "ConvReLU1d",
 | |
|     "ConvReLU2d",
 | |
|     "ConvReLU3d",
 | |
|     # torch.ao.nn.intrinsic.quantized.modules.linear_relu
 | |
|     "LinearLeakyReLU",
 | |
|     "LinearReLU",
 | |
|     "LinearTanh",
 | |
|     # torch.ao.nn.qat.modules.conv
 | |
|     "Conv1d",
 | |
|     "Conv2d",
 | |
|     "Conv3d",
 | |
|     # torch.ao.nn.qat.modules.embedding_ops
 | |
|     "Embedding",
 | |
|     "EmbeddingBag",
 | |
|     # torch.ao.nn.qat.modules.linear
 | |
|     "Linear",
 | |
|     # torch.ao.nn.quantizable.modules.activation
 | |
|     "MultiheadAttention",
 | |
|     # torch.ao.nn.quantizable.modules.rnn
 | |
|     "LSTM",
 | |
|     "LSTMCell",
 | |
|     # torch.ao.nn.quantized.dynamic.modules.conv
 | |
|     "Conv1d",
 | |
|     "Conv2d",
 | |
|     "Conv3d",
 | |
|     "ConvTranspose1d",
 | |
|     "ConvTranspose2d",
 | |
|     "ConvTranspose3d",
 | |
|     # torch.ao.nn.quantized.dynamic.modules.linear
 | |
|     "Linear",
 | |
|     # torch.ao.nn.quantized.dynamic.modules.rnn
 | |
|     "GRU",
 | |
|     "GRUCell",
 | |
|     "LSTM",
 | |
|     "LSTMCell",
 | |
|     "PackedParameter",
 | |
|     "RNNBase",
 | |
|     "RNNCell",
 | |
|     "RNNCellBase",
 | |
|     # torch.ao.nn.quantized.modules.activation
 | |
|     "ELU",
 | |
|     "Hardswish",
 | |
|     "LeakyReLU",
 | |
|     "MultiheadAttention",
 | |
|     "PReLU",
 | |
|     "ReLU6",
 | |
|     "Sigmoid",
 | |
|     "Softmax",
 | |
|     # torch.ao.nn.quantized.modules.batchnorm
 | |
|     "BatchNorm2d",
 | |
|     "BatchNorm3d",
 | |
|     # torch.ao.nn.quantized.modules.conv
 | |
|     "Conv1d",
 | |
|     "Conv2d",
 | |
|     "Conv3d",
 | |
|     "ConvTranspose1d",
 | |
|     "ConvTranspose2d",
 | |
|     "ConvTranspose3d",
 | |
|     # torch.ao.nn.quantized.modules.dropout
 | |
|     "Dropout",
 | |
|     # torch.ao.nn.quantized.modules.embedding_ops
 | |
|     "Embedding",
 | |
|     "EmbeddingBag",
 | |
|     "EmbeddingPackedParams",
 | |
|     # torch.ao.nn.quantized.modules.functional_modules
 | |
|     "FXFloatFunctional",
 | |
|     "FloatFunctional",
 | |
|     "QFunctional",
 | |
|     # torch.ao.nn.quantized.modules.linear
 | |
|     "Linear",
 | |
|     "LinearPackedParams",
 | |
|     # torch.ao.nn.quantized.modules.normalization
 | |
|     "GroupNorm",
 | |
|     "InstanceNorm1d",
 | |
|     "InstanceNorm2d",
 | |
|     "InstanceNorm3d",
 | |
|     "LayerNorm",
 | |
|     # torch.ao.nn.quantized.modules.rnn
 | |
|     "LSTM",
 | |
|     # torch.ao.nn.quantized.modules.utils
 | |
|     "WeightedQuantizedModule",
 | |
|     # torch.ao.nn.quantized.reference.modules.conv
 | |
|     "Conv1d",
 | |
|     "Conv2d",
 | |
|     "Conv3d",
 | |
|     "ConvTranspose1d",
 | |
|     "ConvTranspose2d",
 | |
|     "ConvTranspose3d",
 | |
|     # torch.ao.nn.quantized.reference.modules.linear
 | |
|     "Linear",
 | |
|     # torch.ao.nn.quantized.reference.modules.rnn
 | |
|     "GRU",
 | |
|     "GRUCell",
 | |
|     "LSTM",
 | |
|     "LSTMCell",
 | |
|     "RNNBase",
 | |
|     "RNNCell",
 | |
|     "RNNCellBase",
 | |
|     # torch.ao.nn.quantized.reference.modules.sparse
 | |
|     "Embedding",
 | |
|     "EmbeddingBag",
 | |
|     # torch.ao.nn.quantized.reference.modules.utils
 | |
|     "ReferenceQuantizedModule",
 | |
|     # torch.ao.nn.sparse.quantized.dynamic.linear
 | |
|     "Linear",
 | |
|     # torch.ao.nn.sparse.quantized.linear
 | |
|     "Linear",
 | |
|     "LinearPackedParams",
 | |
|     # torch.ao.nn.sparse.quantized.utils
 | |
|     "LinearBlockSparsePattern",
 | |
|     # torch.ao.ns.fx.graph_matcher
 | |
|     "SubgraphTypeRelationship",
 | |
|     # torch.ao.ns.fx.n_shadows_utils
 | |
|     "OutputProp",
 | |
|     # torch.ao.ns.fx.ns_types
 | |
|     "NSSingleResultValuesType",
 | |
|     "NSSubgraph",
 | |
|     # torch.ao.ns.fx.qconfig_multi_mapping
 | |
|     "QConfigMultiMapping",
 | |
|     # torch.ao.pruning.scheduler.base_scheduler
 | |
|     "BaseScheduler",
 | |
|     # torch.ao.pruning.scheduler.cubic_scheduler
 | |
|     "CubicSL",
 | |
|     # torch.ao.pruning.scheduler.lambda_scheduler
 | |
|     "LambdaSL",
 | |
|     # torch.ao.pruning.sparsifier.base_sparsifier
 | |
|     "BaseSparsifier",
 | |
|     # torch.ao.pruning.sparsifier.nearly_diagonal_sparsifier
 | |
|     "NearlyDiagonalSparsifier",
 | |
|     # torch.ao.pruning.sparsifier.utils
 | |
|     "FakeSparsity",
 | |
|     # torch.ao.pruning.sparsifier.weight_norm_sparsifier
 | |
|     "WeightNormSparsifier",
 | |
|     # torch.ao.quantization.backend_config.backend_config
 | |
|     "BackendConfig",
 | |
|     "BackendPatternConfig",
 | |
|     "DTypeConfig",
 | |
|     # torch.ao.quantization.fake_quantize
 | |
|     "FakeQuantize",
 | |
|     "FakeQuantizeBase",
 | |
|     "FixedQParamsFakeQuantize",
 | |
|     "FusedMovingAvgObsFakeQuantize",
 | |
|     # torch.ao.quantization.fx.fuse_handler
 | |
|     "DefaultFuseHandler",
 | |
|     "FuseHandler",
 | |
|     # torch.ao.quantization.fx.graph_module
 | |
|     "FusedGraphModule",
 | |
|     "ObservedGraphModule",
 | |
|     "ObservedStandaloneGraphModule",
 | |
|     # torch.ao.quantization.fx.quantize_handler
 | |
|     "BatchNormQuantizeHandler",
 | |
|     "BinaryOpQuantizeHandler",
 | |
|     "CatQuantizeHandler",
 | |
|     "ConvReluQuantizeHandler",
 | |
|     "CopyNodeQuantizeHandler",
 | |
|     "CustomModuleQuantizeHandler",
 | |
|     "DefaultNodeQuantizeHandler",
 | |
|     "EmbeddingQuantizeHandler",
 | |
|     "FixedQParamsOpQuantizeHandler",
 | |
|     "GeneralTensorShapeOpQuantizeHandler",
 | |
|     "LinearReLUQuantizeHandler",
 | |
|     "RNNDynamicQuantizeHandler",
 | |
|     "StandaloneModuleQuantizeHandler",
 | |
|     # torch.ao.quantization.fx.tracer
 | |
|     "QuantizationTracer",
 | |
|     "ScopeContextManager",
 | |
|     # torch.ao.quantization.fx.utils
 | |
|     "ObservedGraphModuleAttrs",
 | |
|     # torch.ao.quantization.observer
 | |
|     "FixedQParamsObserver",
 | |
|     "HistogramObserver",
 | |
|     "MinMaxObserver",
 | |
|     "MovingAverageMinMaxObserver",
 | |
|     "MovingAveragePerChannelMinMaxObserver",
 | |
|     "NoopObserver",
 | |
|     "ObserverBase",
 | |
|     "PerChannelMinMaxObserver",
 | |
|     "PlaceholderObserver",
 | |
|     "RecordingObserver",
 | |
|     "ReuseInputObserver",
 | |
|     "UniformQuantizationObserverBase",
 | |
|     "default_debug_observer",
 | |
|     "default_placeholder_observer",
 | |
|     "default_reuse_input_observer",
 | |
|     # torch.ao.quantization.pt2e.duplicate_dq_pass
 | |
|     "DuplicateDQPass",
 | |
|     # torch.ao.quantization.pt2e.port_metadata_pass
 | |
|     "PortNodeMetaForQDQ",
 | |
|     # torch.ao.quantization.qconfig
 | |
|     "QConfigDynamic",
 | |
|     # torch.ao.quantization.quant_type
 | |
|     "QuantType",
 | |
|     # torch.ao.quantization.quantizer.composable_quantizer
 | |
|     "ComposableQuantizer",
 | |
|     # torch.ao.quantization.quantizer.embedding_quantizer
 | |
|     "EmbeddingQuantizer",
 | |
|     # torch.ao.quantization.quantizer.quantizer
 | |
|     "DerivedQuantizationSpec",
 | |
|     "FixedQParamsQuantizationSpec",
 | |
|     "QuantizationAnnotation",
 | |
|     "QuantizationSpec",
 | |
|     "QuantizationSpecBase",
 | |
|     "SharedQuantizationSpec",
 | |
|     # torch.ao.quantization.quantizer.x86_inductor_quantizer
 | |
|     "X86InductorQuantizer",
 | |
|     # torch.ao.quantization.quantizer.xpu_inductor_quantizer
 | |
|     "XPUInductorQuantizer",
 | |
|     # torch.ao.quantization.quantizer.xnnpack_quantizer
 | |
|     "XNNPACKQuantizer",
 | |
|     # torch.ao.quantization.quantizer.xnnpack_quantizer_utils
 | |
|     "OperatorConfig",
 | |
|     "QuantizationConfig",
 | |
|     # torch.ao.quantization.stubs
 | |
|     "DeQuantStub",
 | |
|     "QuantStub",
 | |
|     "QuantWrapper",
 | |
|     # torch.ao.quantization.utils
 | |
|     "MatchAllNode",
 | |
|     # torch.backends.cudnn.rnn
 | |
|     "Unserializable",
 | |
|     # torch.amp.grad_scaler
 | |
|     "GradScaler",
 | |
|     "OptState",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook
 | |
|     "PostLocalSGDState",
 | |
|     # torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook
 | |
|     "PowerSGDState",
 | |
|     # torch.distributed.algorithms.join
 | |
|     "Join",
 | |
|     "JoinHook",
 | |
|     "Joinable",
 | |
|     # torch.distributed.algorithms.model_averaging.averagers
 | |
|     "ModelAverager",
 | |
|     "PeriodicModelAverager",
 | |
|     # torch.distributed.algorithms.model_averaging.hierarchical_model_averager
 | |
|     "HierarchicalModelAverager",
 | |
|     # torch.distributed.argparse_util
 | |
|     "check_env",
 | |
|     "env",
 | |
|     # torch.distributed.checkpoint.api
 | |
|     "CheckpointException",
 | |
|     # torch.distributed.checkpoint.default_planner
 | |
|     "DefaultLoadPlanner",
 | |
|     "DefaultSavePlanner",
 | |
|     # torch.distributed.checkpoint.filesystem
 | |
|     "FileSystemReader",
 | |
|     "FileSystemWriter",
 | |
|     # torch.distributed.checkpoint.hf_storage
 | |
|     "HuggingFaceStorageReader",
 | |
|     "HuggingFaceStorageWriter",
 | |
|     # torch.distributed.checkpoint.quantized_hf_storage
 | |
|     "QuantizedHuggingFaceStorageReader",
 | |
|     # torch.distributed.checkpoint.metadata
 | |
|     "BytesStorageMetadata",
 | |
|     "ChunkStorageMetadata",
 | |
|     "Metadata",
 | |
|     "MetadataIndex",
 | |
|     # torch.distributed.checkpoint.planner
 | |
|     "LoadItemType",
 | |
|     "LoadPlanner",
 | |
|     "SavePlanner",
 | |
|     "WriteItemType",
 | |
|     # torch.distributed.checkpoint.state_dict
 | |
|     "DistributedStateDictOptions",
 | |
|     # torch.distributed.checkpoint.storage
 | |
|     "WriteResult",
 | |
|     # torch.distributed.collective_utils
 | |
|     "SyncPayload",
 | |
|     # torch.distributed.distributed_c10d
 | |
|     "AllToAllOptions",
 | |
|     "AllreduceCoalescedOptions",
 | |
|     "AllreduceOptions",
 | |
|     "Backend",
 | |
|     "BackendConfig",
 | |
|     "BarrierOptions",
 | |
|     "BroadcastOptions",
 | |
|     "DebugLevel",
 | |
|     "GatherOptions",
 | |
|     "GroupMember",
 | |
|     "ProcessGroup",
 | |
|     "ProcessGroupGloo",
 | |
|     "ProcessGroupNCCL",
 | |
|     "ReduceOptions",
 | |
|     "ReduceScatterOptions",
 | |
|     "ScatterOptions",
 | |
|     "Work",
 | |
|     "group",
 | |
|     # torch.distributed.elastic.agent.server.api
 | |
|     "ElasticAgent",
 | |
|     "RunResult",
 | |
|     "SimpleElasticAgent",
 | |
|     "WorkerSpec",
 | |
|     # torch.distributed.elastic.events.api
 | |
|     "Event",
 | |
|     "RdzvEvent",
 | |
|     # torch.distributed.elastic.metrics.api
 | |
|     "ConsoleMetricHandler",
 | |
|     "MetricData",
 | |
|     "MetricHandler",
 | |
|     "MetricStream",
 | |
|     "MetricsConfig",
 | |
|     "NullMetricHandler",
 | |
|     # torch.distributed.elastic.multiprocessing.api
 | |
|     "MultiprocessContext",
 | |
|     "PContext",
 | |
|     "RunProcsResult",
 | |
|     "SignalException",
 | |
|     "Std",
 | |
|     "SubprocessContext",
 | |
|     "SubprocessHandler",
 | |
|     # torch.distributed.elastic.multiprocessing.tail_log
 | |
|     "TailLog",
 | |
|     # torch.distributed.elastic.rendezvous.api
 | |
|     "RendezvousHandler",
 | |
|     "RendezvousHandlerRegistry",
 | |
|     "RendezvousParameters",
 | |
|     # torch.distributed.elastic.rendezvous.dynamic_rendezvous
 | |
|     "DynamicRendezvousHandler",
 | |
|     "RendezvousSettings",
 | |
|     # torch.distributed.elastic.rendezvous.etcd_rendezvous
 | |
|     "EtcdRendezvous",
 | |
|     "EtcdRendezvousHandler",
 | |
|     "EtcdRendezvousRetryImmediately",
 | |
|     "EtcdRendezvousRetryableFailure",
 | |
|     # torch.distributed.elastic.rendezvous.etcd_server
 | |
|     "EtcdServer",
 | |
|     # torch.distributed.elastic.rendezvous.static_tcp_rendezvous
 | |
|     "StaticTCPRendezvous",
 | |
|     # torch.distributed.elastic.timer.api
 | |
|     "RequestQueue",
 | |
|     "TimerClient",
 | |
|     "TimerServer",
 | |
|     # torch.distributed.elastic.timer.file_based_local_timer
 | |
|     "FileTimerClient",
 | |
|     "FileTimerRequest",
 | |
|     "FileTimerServer",
 | |
|     # torch.distributed.elastic.timer.local_timer
 | |
|     "LocalTimerClient",
 | |
|     "LocalTimerServer",
 | |
|     "MultiprocessingRequestQueue",
 | |
|     # torch.distributed.elastic.utils.api
 | |
|     "macros",
 | |
|     # torch.distributed.elastic.utils.data.cycling_iterator
 | |
|     "CyclingIterator",
 | |
|     # torch.distributed.elastic.utils.data.elastic_distributed_sampler
 | |
|     "ElasticDistributedSampler",
 | |
|     # torch.distributed.fsdp.api
 | |
|     "StateDictType",
 | |
|     # torch.distributed.fsdp.fully_sharded_data_parallel
 | |
|     "FullyShardedDataParallel",
 | |
|     "OptimStateKeyType",
 | |
|     # torch.distributed.fsdp.sharded_grad_scaler
 | |
|     "ShardedGradScaler",
 | |
|     # torch.distributed.fsdp.wrap
 | |
|     "CustomPolicy",
 | |
|     "ModuleWrapPolicy",
 | |
|     # torch.distributed.launcher.api
 | |
|     "LaunchConfig",
 | |
|     "elastic_launch",
 | |
|     # torch.distributed.optim.optimizer
 | |
|     "DistributedOptimizer",
 | |
|     # torch.distributed.optim.post_localSGD_optimizer
 | |
|     "PostLocalSGDOptimizer",
 | |
|     # torch.distributed.optim.zero_redundancy_optimizer
 | |
|     "ZeroRedundancyOptimizer",
 | |
|     # torch.distributed.rpc.api
 | |
|     "AllGatherStates",
 | |
|     "RRef",
 | |
|     # torch.distributed.rpc.backend_registry
 | |
|     "BackendValue",
 | |
|     # torch.distributed.rpc.internal
 | |
|     "PythonUDF",
 | |
|     "RPCExecMode",
 | |
|     "RemoteException",
 | |
|     # torch.distributed.rpc.rref_proxy
 | |
|     "RRefProxy",
 | |
|     # torch.distributed.tensor.parallel.fsdp
 | |
|     "DTensorExtensions",
 | |
|     # torch.distributed.tensor.parallel.style
 | |
|     "ParallelStyle",
 | |
|     # torch.distributions.logistic_normal
 | |
|     "LogisticNormal",
 | |
|     # torch.distributions.one_hot_categorical
 | |
|     "OneHotCategoricalStraightThrough",
 | |
|     # torch.distributions.relaxed_categorical
 | |
|     "ExpRelaxedCategorical",
 | |
|     # torch.distributions.utils
 | |
|     "lazy_property",
 | |
|     # torch.export.unflatten
 | |
|     "UnflattenedModule",
 | |
|     # torch.export.exported_program
 | |
|     "ConstantArgument",
 | |
|     "ExportedProgram",
 | |
|     # torch.fx.experimental.accelerator_partitioner
 | |
|     "DAG",
 | |
|     "DAGNode",
 | |
|     "PartitionResult",
 | |
|     "Partitioner",
 | |
|     # torch.fx.experimental.const_fold
 | |
|     "FoldedGraphModule",
 | |
|     # torch.fx.experimental.graph_gradual_typechecker
 | |
|     "Refine",
 | |
|     # torch.fx.experimental.meta_tracer
 | |
|     "MetaAttribute",
 | |
|     "MetaDeviceAttribute",
 | |
|     "MetaProxy",
 | |
|     "MetaTracer",
 | |
|     # torch.fx.experimental.migrate_gradual_types.constraint
 | |
|     "ApplyBroadcasting",
 | |
|     "BVar",
 | |
|     "BinConstraintD",
 | |
|     "BinConstraintT",
 | |
|     "BinaryConstraint",
 | |
|     "CalcConv",
 | |
|     "CalcMaxPool",
 | |
|     "CalcProduct",
 | |
|     "CanReshape",
 | |
|     "Conj",
 | |
|     "Constraint",
 | |
|     "DGreatestUpperBound",
 | |
|     "DVar",
 | |
|     "Disj",
 | |
|     "F",
 | |
|     "GetItem",
 | |
|     "GetItemTensor",
 | |
|     "IndexSelect",
 | |
|     "Prod",
 | |
|     "T",
 | |
|     "TGreatestUpperBound",
 | |
|     "TVar",
 | |
|     "Transpose",
 | |
|     # torch.fx.experimental.migrate_gradual_types.constraint_generator
 | |
|     "ConstraintGenerator",
 | |
|     # torch.fx.experimental.normalize
 | |
|     "NormalizeArgs",
 | |
|     "NormalizeOperators",
 | |
|     # torch.fx.experimental.optimization
 | |
|     "MklSubgraph",
 | |
|     "UnionFind",
 | |
|     # torch.fx.experimental.partitioner_utils
 | |
|     "Device",
 | |
|     "Partition",
 | |
|     "PartitionLatency",
 | |
|     "PartitionMode",
 | |
|     "PartitionerConfig",
 | |
|     # torch.fx.experimental.proxy_tensor
 | |
|     "DecompositionInterpreter",
 | |
|     "PreDispatchTorchFunctionMode",
 | |
|     "ProxySymDispatchMode",
 | |
|     "ProxyTorchDispatchMode",
 | |
|     "PythonKeyTracer",
 | |
|     # torch.fx.experimental.recording
 | |
|     "FakeTensorMeta",
 | |
|     "NotEqualError",
 | |
|     "ShapeEnvEvent",
 | |
|     # torch.fx.experimental.refinement_types
 | |
|     "Equality",
 | |
|     # torch.fx.experimental.rewriter
 | |
|     "AST_Rewriter",
 | |
|     "RewritingTracer",
 | |
|     # torch.fx.experimental.schema_type_annotation
 | |
|     "AnnotateTypesWithSchema",
 | |
|     # torch.fx.experimental.sym_node
 | |
|     "SymNode",
 | |
|     # torch.fx.experimental.symbolic_shapes
 | |
|     "Constraint",
 | |
|     "ConstraintViolationError",
 | |
|     "DynamicDimConstraintPrinter",
 | |
|     "GuardOnDataDependentSymNode",
 | |
|     "PendingUnbackedSymbolNotFound",
 | |
|     "LoggingShapeGuardPrinter",
 | |
|     "SymExprPrinter",
 | |
|     "RelaxedUnspecConstraint",
 | |
|     "RuntimeAssert",
 | |
|     "ShapeGuardPrinter",
 | |
|     "ShapeGuardPythonPrinter",
 | |
|     "SymDispatchMode",
 | |
|     "SymbolicContext",
 | |
|     # torch.fx.experimental.unification.match
 | |
|     "Dispatcher",
 | |
|     "VarDispatcher",
 | |
|     # torch.fx.experimental.unification.multipledispatch.conflict
 | |
|     "AmbiguityWarning",
 | |
|     # torch.fx.experimental.unification.multipledispatch.dispatcher
 | |
|     "Dispatcher",
 | |
|     "MDNotImplementedError",
 | |
|     "MethodDispatcher",
 | |
|     # torch.fx.experimental.unification.multipledispatch.variadic
 | |
|     "Variadic",
 | |
|     "VariadicSignatureMeta",
 | |
|     "VariadicSignatureType",
 | |
|     # torch.fx.experimental.unification.variable
 | |
|     "Var",
 | |
|     # torch.fx.experimental.validator
 | |
|     "BisectValidationException",
 | |
|     "PopulateValidator",
 | |
|     "SympyToZ3",
 | |
|     "ValidationException",
 | |
|     # torch.fx.graph
 | |
|     "PythonCode",
 | |
|     # torch.fx.immutable_collections
 | |
|     "immutable_dict",
 | |
|     "immutable_list",
 | |
|     # torch.fx.interpreter
 | |
|     "Interpreter",
 | |
|     # torch.fx.operator_schemas
 | |
|     "ArgsKwargsPair",
 | |
|     # torch.fx.passes.backends.cudagraphs
 | |
|     "CudaGraphsSupport",
 | |
|     # torch.fx.passes.dialect.common.cse_pass
 | |
|     "CSEPass",
 | |
|     # torch.fx.passes.fake_tensor_prop
 | |
|     "FakeTensorProp",
 | |
|     # torch.fx.passes.graph_drawer
 | |
|     "FxGraphDrawer",
 | |
|     # torch.fx.passes.graph_manipulation
 | |
|     "size_bytes",
 | |
|     # torch.fx.passes.infra.partitioner
 | |
|     "CapabilityBasedPartitioner",
 | |
|     "Partition",
 | |
|     # torch.fx.passes.infra.pass_base
 | |
|     "PassBase",
 | |
|     "PassResult",
 | |
|     # torch.fx.passes.infra.pass_manager
 | |
|     "PassManager",
 | |
|     # torch.fx.passes.net_min_base
 | |
|     "FxNetMinimizerBadModuleError",
 | |
|     "FxNetMinimizerResultMismatchError",
 | |
|     "FxNetMinimizerRunFuncError",
 | |
|     # torch.fx.passes.operator_support
 | |
|     "OpSupports",
 | |
|     "OperatorSupport",
 | |
|     "OperatorSupportBase",
 | |
|     # torch.fx.passes.pass_manager
 | |
|     "PassManager",
 | |
|     # torch.fx.passes.shape_prop
 | |
|     "ShapeProp",
 | |
|     # torch.fx.passes.split_module
 | |
|     "Partition",
 | |
|     # torch.fx.passes.split_utils
 | |
|     "Component",
 | |
|     # torch.fx.passes.splitter_base
 | |
|     "FxNetAccNodesFinder",
 | |
|     "FxNetSplitterInternalError",
 | |
|     "SplitResult",
 | |
|     "Subgraph",
 | |
|     # torch.fx.passes.tests.test_pass_manager
 | |
|     "TestPassManager",
 | |
|     # torch.fx.passes.tools_common
 | |
|     "FxNetAccFusionsFinder",
 | |
|     # torch.fx.passes.utils.common
 | |
|     "HolderModule",
 | |
|     # torch.fx.passes.utils.matcher_utils
 | |
|     "InternalMatch",
 | |
|     "SubgraphMatcher",
 | |
|     # torch.fx.passes.utils.source_matcher_utils
 | |
|     "SourcePartition",
 | |
|     # torch.fx.proxy
 | |
|     "Attribute",
 | |
|     "ParameterProxy",
 | |
|     "Proxy",
 | |
|     "Scope",
 | |
|     "ScopeContextManager",
 | |
|     "TraceError",
 | |
|     "TracerBase",
 | |
|     # torch.fx.subgraph_rewriter
 | |
|     "Match",
 | |
|     "ReplacedPatterns",
 | |
|     # torch.jit.annotations
 | |
|     "EvalEnv",
 | |
|     "Module",
 | |
|     # torch.jit.frontend
 | |
|     "Builder",
 | |
|     "ExprBuilder",
 | |
|     "FrontendError",
 | |
|     "FrontendTypeError",
 | |
|     "NotSupportedError",
 | |
|     "StmtBuilder",
 | |
|     "UnsupportedNodeError",
 | |
|     "WithItemBuilder",
 | |
|     # torch.masked.maskedtensor.core
 | |
|     "MaskedTensor",
 | |
|     # torch.multiprocessing.pool
 | |
|     "Pool",
 | |
|     # torch.multiprocessing.queue
 | |
|     "ConnectionWrapper",
 | |
|     "Queue",
 | |
|     "SimpleQueue",
 | |
|     # torch.multiprocessing.reductions
 | |
|     "SharedCache",
 | |
|     # torch.multiprocessing.spawn
 | |
|     "ProcessContext",
 | |
|     "ProcessException",
 | |
|     "ProcessExitedException",
 | |
|     "ProcessRaisedException",
 | |
|     "SpawnContext",
 | |
|     # torch.nn.cpp
 | |
|     "ModuleWrapper",
 | |
|     "OrderedDictWrapper",
 | |
|     # torch.nn.modules.container
 | |
|     "Container",  # deprecated
 | |
|     # torch.nn.modules.linear
 | |
|     "NonDynamicallyQuantizableLinear",
 | |
|     # torch.nn.modules.module
 | |
|     # TODO: causes multiple sphinx warnings
 | |
|     # WARNING: more than one target found for cross-reference 'Module'
 | |
|     "Module",
 | |
|     # torch.nn.modules.loss
 | |
|     "NLLLoss2d",  # deprecated
 | |
|     # torch.nn.modules.normalization
 | |
|     "CrossMapLRN2d",
 | |
|     # torch.nn.parallel.data_parallel
 | |
|     "DataParallel",
 | |
|     # torch.nn.parallel.distributed
 | |
|     "DistributedDataParallel",
 | |
|     # torch.nn.parameter
 | |
|     "UninitializedTensorMixin",
 | |
|     # torch.nn.utils.parametrize
 | |
|     "ParametrizationList",
 | |
|     # torch.nn.utils.prune
 | |
|     "CustomFromMask",
 | |
|     "Identity",
 | |
|     "L1Unstructured",
 | |
|     "RandomUnstructured",
 | |
|     # torch.nn.utils.rnn
 | |
|     "PackedSequence",
 | |
|     "PackedSequence_",
 | |
|     # torch.nn.utils.spectral_norm
 | |
|     "SpectralNorm",
 | |
|     "SpectralNormLoadStateDictPreHook",
 | |
|     "SpectralNormStateDictHook",
 | |
|     # torch.nn.utils.weight_norm
 | |
|     "WeightNorm",
 | |
|     # torch.onnx.errors
 | |
|     "OnnxExporterError",
 | |
|     "OnnxExporterWarning",
 | |
|     "SymbolicValueError",
 | |
|     "UnsupportedOperatorError",
 | |
|     # torch.onnx.verification
 | |
|     "OnnxBackend",
 | |
|     "OnnxTestCaseRepro",
 | |
|     # torch.optim.optimizer
 | |
|     "Optimizer",
 | |
|     # torch.overrides
 | |
|     "BaseTorchFunctionMode",
 | |
|     "TorchFunctionMode",
 | |
|     # torch.package.file_structure_representation
 | |
|     "Directory",
 | |
|     # torch.package.glob_group
 | |
|     "GlobGroup",
 | |
|     # torch.package.importer
 | |
|     "Importer",
 | |
|     "ObjMismatchError",
 | |
|     "ObjNotFoundError",
 | |
|     "OrderedImporter",
 | |
|     # torch.package.package_exporter
 | |
|     "PackageExporter",
 | |
|     "PackagingErrorReason",
 | |
|     # torch.package.package_importer
 | |
|     "PackageImporter",
 | |
|     # torch.profiler.profiler
 | |
|     "ExecutionTraceObserver",
 | |
|     "profile",
 | |
|     # torch.return_types
 | |
|     "aminmax",
 | |
|     "aminmax_out",
 | |
|     "cummax",
 | |
|     "cummax_out",
 | |
|     "cummin",
 | |
|     "cummin_out",
 | |
|     "frexp",
 | |
|     "frexp_out",
 | |
|     "geqrf",
 | |
|     "geqrf_out",
 | |
|     "histogram",
 | |
|     "histogram_out",
 | |
|     "histogramdd",
 | |
|     "kthvalue",
 | |
|     "kthvalue_out",
 | |
|     "linalg_cholesky_ex",
 | |
|     "linalg_cholesky_ex_out",
 | |
|     "linalg_eig",
 | |
|     "linalg_eig_out",
 | |
|     "linalg_eigh",
 | |
|     "linalg_eigh_out",
 | |
|     "linalg_inv_ex",
 | |
|     "linalg_inv_ex_out",
 | |
|     "linalg_ldl_factor",
 | |
|     "linalg_ldl_factor_ex",
 | |
|     "linalg_ldl_factor_ex_out",
 | |
|     "linalg_ldl_factor_out",
 | |
|     "linalg_lstsq",
 | |
|     "linalg_lstsq_out",
 | |
|     "linalg_lu",
 | |
|     "linalg_lu_factor",
 | |
|     "linalg_lu_factor_ex",
 | |
|     "linalg_lu_factor_ex_out",
 | |
|     "linalg_lu_factor_out",
 | |
|     "linalg_lu_out",
 | |
|     "linalg_qr",
 | |
|     "linalg_qr_out",
 | |
|     "linalg_slogdet",
 | |
|     "linalg_slogdet_out",
 | |
|     "linalg_solve_ex",
 | |
|     "linalg_solve_ex_out",
 | |
|     "linalg_svd",
 | |
|     "linalg_svd_out",
 | |
|     "lu_unpack",
 | |
|     "lu_unpack_out",
 | |
|     "max",
 | |
|     "max_out",
 | |
|     "median",
 | |
|     "median_out",
 | |
|     "min",
 | |
|     "min_out",
 | |
|     "mode",
 | |
|     "mode_out",
 | |
|     "nanmedian",
 | |
|     "nanmedian_out",
 | |
|     "qr",
 | |
|     "qr_out",
 | |
|     "slogdet",
 | |
|     "slogdet_out",
 | |
|     "sort",
 | |
|     "sort_out",
 | |
|     "svd",
 | |
|     "svd_out",
 | |
|     "topk",
 | |
|     "topk_out",
 | |
|     "triangular_solve",
 | |
|     "triangular_solve_out",
 | |
|     # torch.serialization
 | |
|     "LoadEndianness",
 | |
|     "SourceChangeWarning",
 | |
|     # torch.sparse.semi_structured
 | |
|     "SparseSemiStructuredTensor",
 | |
|     # torch.storage
 | |
|     "UntypedStorage",
 | |
|     # torch.torch_version
 | |
|     "TorchVersion",
 | |
|     # torch.types
 | |
|     "SymInt",
 | |
|     # torch.utils.benchmark.examples.compare
 | |
|     "FauxTorch",
 | |
|     # torch.utils.benchmark.examples.spectral_ops_fuzz_test
 | |
|     "Benchmark",
 | |
|     # torch.utils.benchmark.op_fuzzers.binary
 | |
|     "BinaryOpFuzzer",
 | |
|     # torch.utils.benchmark.op_fuzzers.sparse_binary
 | |
|     "BinaryOpSparseFuzzer",
 | |
|     # torch.utils.benchmark.op_fuzzers.sparse_unary
 | |
|     "UnaryOpSparseFuzzer",
 | |
|     # torch.utils.benchmark.op_fuzzers.spectral
 | |
|     "SpectralOpFuzzer",
 | |
|     # torch.utils.benchmark.op_fuzzers.unary
 | |
|     "UnaryOpFuzzer",
 | |
|     # torch.utils.benchmark.utils.common
 | |
|     "Measurement",
 | |
|     "TaskSpec",
 | |
|     # torch.utils.benchmark.utils.compare
 | |
|     "Colorize",
 | |
|     "Compare",
 | |
|     "Table",
 | |
|     # torch.utils.benchmark.utils.fuzzer
 | |
|     "FuzzedParameter",
 | |
|     "FuzzedTensor",
 | |
|     "Fuzzer",
 | |
|     "ParameterAlias",
 | |
|     # torch.utils.benchmark.utils.sparse_fuzzer
 | |
|     "FuzzedSparseTensor",
 | |
|     # torch.utils.benchmark.utils.timer
 | |
|     "CPPTimer",
 | |
|     "Language",
 | |
|     "Timer",
 | |
|     # torch.utils.benchmark.utils.valgrind_wrapper.timer_interface
 | |
|     "CallgrindStats",
 | |
|     "CopyIfCallgrind",
 | |
|     "FunctionCount",
 | |
|     "FunctionCounts",
 | |
|     "GlobalsBridge",
 | |
|     "Serialization",
 | |
|     # torch.utils.bundled_inputs
 | |
|     "InflatableArg",
 | |
|     # torch.utils.checkpoint
 | |
|     "CheckpointError",
 | |
|     "CheckpointFunction",
 | |
|     "DefaultDeviceType",
 | |
|     # torch.utils.collect_env
 | |
|     "SystemEnv",
 | |
|     # torch.utils.cpp_extension
 | |
|     "BuildExtension",
 | |
|     # torch.utils.data.dataloader
 | |
|     "DataLoader",
 | |
|     # torch.utils.data.datapipes.dataframe.dataframe_wrapper
 | |
|     "PandasWrapper",
 | |
|     "default_wrapper",
 | |
|     # torch.utils.data.datapipes.dataframe.dataframes
 | |
|     "Capture",
 | |
|     "CaptureA",
 | |
|     "CaptureAdd",
 | |
|     "CaptureCall",
 | |
|     "CaptureControl",
 | |
|     "CaptureDataFrame",
 | |
|     "CaptureDataFrameWithDataPipeOps",
 | |
|     "CaptureF",
 | |
|     "CaptureGetAttr",
 | |
|     "CaptureGetItem",
 | |
|     "CaptureInitial",
 | |
|     "CaptureLikeMock",
 | |
|     "CaptureMul",
 | |
|     "CaptureSetItem",
 | |
|     "CaptureSub",
 | |
|     "CaptureVariable",
 | |
|     "CaptureVariableAssign",
 | |
|     "DataFrameTracedOps",
 | |
|     "DataFrameTracer",
 | |
|     # torch.utils.data.datapipes.dataframe.datapipes
 | |
|     "ConcatDataFramesPipe",
 | |
|     "DataFramesAsTuplesPipe",
 | |
|     "ExampleAggregateAsDataFrames",
 | |
|     "FilterDataFramesPipe",
 | |
|     "PerRowDataFramesPipe",
 | |
|     "ShuffleDataFramesPipe",
 | |
|     # torch.utils.data.datapipes.dataframe.structures
 | |
|     "DataChunkDF",
 | |
|     # torch.utils.data.datapipes.datapipe
 | |
|     "DFIterDataPipe",
 | |
|     "DataChunk",
 | |
|     "IterDataPipe",
 | |
|     "MapDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.callable
 | |
|     "CollatorIterDataPipe",
 | |
|     "MapperIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.combinatorics
 | |
|     "SamplerIterDataPipe",
 | |
|     "ShufflerIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.combining
 | |
|     "ConcaterIterDataPipe",
 | |
|     "DemultiplexerIterDataPipe",
 | |
|     "ForkerIterDataPipe",
 | |
|     "MultiplexerIterDataPipe",
 | |
|     "ZipperIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.filelister
 | |
|     "FileListerIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.fileopener
 | |
|     "FileOpenerIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.grouping
 | |
|     "BatcherIterDataPipe",
 | |
|     "GrouperIterDataPipe",
 | |
|     "UnBatcherIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.routeddecoder
 | |
|     "RoutedDecoderIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.selecting
 | |
|     "FilterIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.sharding
 | |
|     "SHARDING_PRIORITIES",
 | |
|     "ShardingFilterIterDataPipe",
 | |
|     # torch.utils.data.datapipes.iter.utils
 | |
|     "IterableWrapperIterDataPipe",
 | |
|     # torch.utils.data.datapipes.map.callable
 | |
|     "MapperMapDataPipe",
 | |
|     # torch.utils.data.datapipes.map.combinatorics
 | |
|     "ShufflerIterDataPipe",
 | |
|     # torch.utils.data.datapipes.map.combining
 | |
|     "ConcaterMapDataPipe",
 | |
|     "ZipperMapDataPipe",
 | |
|     # torch.utils.data.datapipes.map.grouping
 | |
|     "BatcherMapDataPipe",
 | |
|     # torch.utils.data.datapipes.map.utils
 | |
|     "SequenceWrapperMapDataPipe",
 | |
|     # torch.utils.data.datapipes.utils.decoder
 | |
|     "Decoder",
 | |
|     "ImageHandler",
 | |
|     "MatHandler",
 | |
|     # torch.utils.data.dataset
 | |
|     "ConcatDataset",
 | |
|     # torch.utils.data.distributed
 | |
|     "DistributedSampler",
 | |
|     # torch.utils.dlpack
 | |
|     "DLDeviceType",
 | |
|     # torch.utils.file_baton
 | |
|     "FileBaton",
 | |
|     # torch.utils.flop_counter
 | |
|     "FlopCounterMode",
 | |
|     # torch.utils.hipify.hipify_python
 | |
|     "CurrentState",
 | |
|     "GeneratedFileCleaner",
 | |
|     "HipifyResult",
 | |
|     "InputError",
 | |
|     "Trie",
 | |
|     "bcolors",
 | |
|     # torch.utils.hooks
 | |
|     "BackwardHook",
 | |
|     "RemovableHandle",
 | |
|     # torch.utils.mkldnn
 | |
|     "MkldnnBatchNorm",
 | |
|     "MkldnnConv1d",
 | |
|     "MkldnnConv2d",
 | |
|     "MkldnnConv3d",
 | |
|     "MkldnnLinear",
 | |
|     "MkldnnPrelu",
 | |
|     # torch.utils.mobile_optimizer
 | |
|     "LintCode",
 | |
|     # torch.utils.show_pickle
 | |
|     "DumpUnpickler",
 | |
|     "FakeClass",
 | |
|     "FakeObject",
 | |
|     # torch.utils.tensorboard.writer
 | |
|     "FileWriter",
 | |
|     "SummaryWriter",
 | |
|     # torch.utils.throughput_benchmark
 | |
|     "ExecutionStats",
 | |
|     # torch.utils.weak
 | |
|     "WeakIdKeyDictionary",
 | |
|     "WeakIdRef",
 | |
|     "WeakTensorKeyDictionary",
 | |
| ]
 | |
| 
 | |
| # The suffix(es) of source filenames.
 | |
| # You can specify multiple suffix as a list of string:
 | |
| #
 | |
| # source_suffix = ['.rst', '.md']
 | |
| source_suffix = ".rst"
 | |
| 
 | |
| # The master toctree document.
 | |
| master_doc = "index"
 | |
| 
 | |
| 
 | |
| # Use the linkcode extension to override [SOURCE] links to point
 | |
| # to the repo. Use the torch_version variable defined above to
 | |
| # determine link
 | |
| def linkcode_resolve(domain, info):
 | |
|     if domain != "py":
 | |
|         return None
 | |
|     if not info["module"]:
 | |
|         return None
 | |
| 
 | |
|     try:
 | |
|         module = __import__(info["module"], fromlist=[""])
 | |
|         obj = module
 | |
|         for part in info["fullname"].split("."):
 | |
|             obj = getattr(obj, part)
 | |
|         # Get the source file and line number
 | |
|         obj = inspect.unwrap(obj)
 | |
|         fn = inspect.getsourcefile(obj)
 | |
|         source, lineno = inspect.getsourcelines(obj)
 | |
|     except Exception:
 | |
|         return None
 | |
| 
 | |
|     # Determine the tag based on the torch_version
 | |
|     if RELEASE:
 | |
|         version_parts = torch_version.split(
 | |
|             "."
 | |
|         )  # For release versions, format as "vX.Y.Z" for correct path in repo
 | |
|         patch_version = (
 | |
|             version_parts[2].split("+")[0].split("a")[0]
 | |
|         )  # assuming a0 always comes after release version in versions.txt
 | |
|         version_path = f"v{version_parts[0]}.{version_parts[1]}.{patch_version}"
 | |
|     else:
 | |
|         version_path = torch.version.git_version
 | |
|     fn = os.path.relpath(fn, start=os.path.dirname(torch.__file__))
 | |
|     return (
 | |
|         f"https://github.com/pytorch/pytorch/blob/{version_path}/torch/{fn}#L{lineno}"
 | |
|     )
 | |
| 
 | |
| 
 | |
| # The language for content autogenerated by Sphinx. Refer to documentation
 | |
| # for a list of supported languages.
 | |
| #
 | |
| # This is also used if you do content translation via gettext catalogs.
 | |
| # Usually you set "language" from the command line for these cases.
 | |
| language = "en"
 | |
| 
 | |
| # List of patterns, relative to source directory, that match files and
 | |
| # directories to ignore when looking for source files.
 | |
| # This patterns also effect to html_static_path and html_extra_path
 | |
| exclude_patterns = []
 | |
| 
 | |
| # The name of the Pygments (syntax highlighting) style to use.
 | |
| pygments_style = "sphinx"
 | |
| 
 | |
| # If true, `todo` and `todoList` produce output, else they produce nothing.
 | |
| # Disable docstring inheritance
 | |
| autodoc_inherit_docstrings = False
 | |
| 
 | |
| # Show type hints in the description
 | |
| autodoc_typehints = "description"
 | |
| 
 | |
| # Add parameter types if the parameter is documented in the docstring
 | |
| autodoc_typehints_description_target = "documented_params"
 | |
| 
 | |
| # Type aliases for common types
 | |
| # Sphinx type aliases only works with Postponed Evaluation of Annotations
 | |
| # (PEP 563) enabled (via `from __future__ import annotations`), which keeps the
 | |
| # type annotations in string form instead of resolving them to actual types.
 | |
| # However, PEP 563 does not work well with JIT, which uses the type information
 | |
| # to generate the code. Therefore, the following dict does not have any effect
 | |
| # until PEP 563 is supported by JIT and enabled in files.
 | |
| autodoc_type_aliases = {
 | |
|     "_size_1_t": "int or tuple[int]",
 | |
|     "_size_2_t": "int or tuple[int, int]",
 | |
|     "_size_3_t": "int or tuple[int, int, int]",
 | |
|     "_size_4_t": "int or tuple[int, int, int, int]",
 | |
|     "_size_5_t": "int or tuple[int, int, int, int, int]",
 | |
|     "_size_6_t": "int or tuple[int, int, int, int, int, int]",
 | |
|     "_size_any_opt_t": "int or None or tuple",
 | |
|     "_size_2_opt_t": "int or None or 2-tuple",
 | |
|     "_size_3_opt_t": "int or None or 3-tuple",
 | |
|     "_ratio_2_t": "float or tuple[float, float]",
 | |
|     "_ratio_3_t": "float or tuple[float, float, float]",
 | |
|     "_ratio_any_t": "float or tuple",
 | |
|     "_tensor_list_t": "Tensor or tuple[Tensor]",
 | |
| }
 | |
| 
 | |
| # Enable overriding of function signatures in the first line of the docstring.
 | |
| autodoc_docstring_signature = True
 | |
| 
 | |
| # -- katex javascript in header
 | |
| #
 | |
| #    def setup(app):
 | |
| #    app.add_javascript("https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.js")
 | |
| 
 | |
| 
 | |
| # -- Options for HTML output ----------------------------------------------
 | |
| #
 | |
| # The theme to use for HTML and HTML Help pages.  See the documentation for
 | |
| # a list of builtin themes.
 | |
| #
 | |
| #
 | |
| #
 | |
| 
 | |
| 
 | |
| # Theme options are theme-specific and customize the look and feel of a theme
 | |
| # further.  For a list of options available for each theme, see the
 | |
| # documentation.
 | |
| 
 | |
| # Add any paths that contain custom static files (such as style sheets) here,
 | |
| # relative to this directory. They are copied after the builtin static files,
 | |
| # so a file named "default.css" will overwrite the builtin "default.css".
 | |
| 
 | |
| html_css_files = [
 | |
|     "css/jit.css",
 | |
|     "css/custom.css",
 | |
|     "https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css",
 | |
| ]
 | |
| 
 | |
| html_js_files = ["js/runllm-widget.js"]
 | |
| 
 | |
| from sphinx.ext.coverage import CoverageBuilder
 | |
| 
 | |
| 
 | |
| # NB: Due to some duplications of the following modules/functions, we keep
 | |
| # them as expected failures for the time being instead of return 1
 | |
| ignore_duplicated_modules = {
 | |
|     "torch.nn.utils.weight_norm",
 | |
|     "torch.nn.utils.spectral_norm",
 | |
|     "torch.nn.parallel.data_parallel",
 | |
|     "torch.ao.quantization.quantize",
 | |
| }
 | |
| 
 | |
| 
 | |
| def coverage_post_process(app, exception):
 | |
|     if exception is not None:
 | |
|         return
 | |
| 
 | |
|     # Only run this test for the coverage build
 | |
|     if not isinstance(app.builder, CoverageBuilder):
 | |
|         return
 | |
| 
 | |
|     if not torch.distributed.is_available():
 | |
|         raise RuntimeError(
 | |
|             "The coverage tool cannot run with a version "
 | |
|             "of PyTorch that was built with USE_DISTRIBUTED=0 "
 | |
|             "as this module's API changes."
 | |
|         )
 | |
| 
 | |
|     # These are all the modules that have "automodule" in an rst file
 | |
|     # These modules are the ones for which coverage is checked
 | |
|     # Here, we make sure that no module is missing from that list
 | |
|     modules = app.env.domaindata["py"]["modules"]
 | |
| 
 | |
|     # We go through all the torch submodules and make sure they are
 | |
|     # properly tested
 | |
|     missing = set()
 | |
| 
 | |
|     def is_not_internal(modname):
 | |
|         split_name = modname.split(".")
 | |
|         for name in split_name:
 | |
|             if name[0] == "_":
 | |
|                 return False
 | |
|         return True
 | |
| 
 | |
|     # The walk function does not return the top module
 | |
|     if "torch" not in modules:
 | |
|         missing.add("torch")
 | |
| 
 | |
|     for _, modname, ispkg in pkgutil.walk_packages(
 | |
|         path=torch.__path__, prefix=torch.__name__ + "."
 | |
|     ):
 | |
|         if is_not_internal(modname):
 | |
|             if modname not in modules and modname not in ignore_duplicated_modules:
 | |
|                 missing.add(modname)
 | |
| 
 | |
|     output = []
 | |
| 
 | |
|     if missing:
 | |
|         mods = ", ".join(missing)
 | |
|         output.append(
 | |
|             f"\nYou added the following module(s) to the PyTorch namespace '{mods}' "
 | |
|             "but they have no corresponding entry in a doc .rst file. You should "
 | |
|             "either make sure that the .rst file that contains the module's documentation "
 | |
|             "properly contains either '.. automodule:: mod_name' (if you do not want "
 | |
|             "the paragraph added by the automodule, you can simply use '.. py:module:: mod_name') "
 | |
|             " or make the module private (by appending an '_' at the beginning of its name)."
 | |
|         )
 | |
| 
 | |
|     # The output file is hard-coded by the coverage tool
 | |
|     # Our CI is setup to fail if any line is added to this file
 | |
|     output_file = path.join(app.outdir, "python.txt")
 | |
| 
 | |
|     if output:
 | |
|         with open(output_file, "a") as f:
 | |
|             for o in output:
 | |
|                 f.write(o)
 | |
| 
 | |
| 
 | |
| def process_docstring(app, what_, name, obj, options, lines):
 | |
|     """
 | |
|     Custom process to transform docstring lines Remove "Ignore" blocks
 | |
| 
 | |
|     Args:
 | |
|         app (sphinx.application.Sphinx): the Sphinx application object
 | |
| 
 | |
|         what (str):
 | |
|             the type of the object which the docstring belongs to (one of
 | |
|             "module", "class", "exception", "function", "method", "attribute")
 | |
| 
 | |
|         name (str): the fully qualified name of the object
 | |
| 
 | |
|         obj: the object itself
 | |
| 
 | |
|         options: the options given to the directive: an object with
 | |
|             attributes inherited_members, undoc_members, show_inheritance
 | |
|             and noindex that are true if the flag option of same name was
 | |
|             given to the auto directive
 | |
| 
 | |
|         lines (List[str]): the lines of the docstring, see above
 | |
| 
 | |
|     References:
 | |
|         https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
 | |
|     """
 | |
|     import re
 | |
| 
 | |
|     remove_directives = [
 | |
|         # Remove all xdoctest directives
 | |
|         re.compile(r"\s*>>>\s*#\s*x?doctest:\s*.*"),
 | |
|         re.compile(r"\s*>>>\s*#\s*x?doc:\s*.*"),
 | |
|     ]
 | |
|     filtered_lines = [
 | |
|         line for line in lines if not any(pat.match(line) for pat in remove_directives)
 | |
|     ]
 | |
|     # Modify the lines inplace
 | |
|     lines[:] = filtered_lines
 | |
| 
 | |
|     # make sure there is a blank line at the end
 | |
|     if lines and lines[-1].strip():
 | |
|         lines.append("")
 | |
| 
 | |
| 
 | |
| def setup(app):
 | |
|     app.connect("build-finished", coverage_post_process)
 | |
|     app.connect("autodoc-process-docstring", process_docstring)
 | |
|     app.connect("html-page-context", hide_edit_button_for_pages)
 | |
|     app.config.add_last_updated = True
 | |
|     return {"version": "0.1", "parallel_read_safe": True}
 | |
| 
 | |
| 
 | |
| def hide_edit_button_for_pages(app, pagename, templatename, context, doctree):
 | |
|     if pagename.startswith("generated/"):
 | |
|         context["theme_use_edit_page_button"] = False
 | |
| 
 | |
| 
 | |
| # From PyTorch 1.5, we now use autogenerated files to document classes and
 | |
| # functions. This breaks older references since
 | |
| # https://pytorch.org/docs/stable/torch.html#torch.flip
 | |
| # moved to
 | |
| # https://pytorch.org/docs/stable/generated/torch.flip.html
 | |
| # which breaks older links from blog posts, stack overflow answers and more.
 | |
| # To mitigate that, we add an id="torch.flip" in an appropriated place
 | |
| # in torch.html by overriding the visit_reference method of html writers.
 | |
| # Someday this can be removed, once the old links fade away
 | |
| 
 | |
| from sphinx.writers import html, html5
 | |
| 
 | |
| 
 | |
| def replace(Klass):
 | |
|     old_call = Klass.visit_reference
 | |
| 
 | |
|     def visit_reference(self, node):
 | |
|         if "refuri" in node and "generated" in node.get("refuri"):
 | |
|             ref = node.get("refuri")
 | |
|             ref_anchor = ref.split("#")
 | |
|             if len(ref_anchor) > 1:
 | |
|                 # Only add the id if the node href and the text match,
 | |
|                 # i.e. the href is "torch.flip#torch.flip" and the content is
 | |
|                 # "torch.flip" or "flip" since that is a signal the node refers
 | |
|                 # to autogenerated content
 | |
|                 anchor = ref_anchor[1]
 | |
|                 txt = node.parent.astext()
 | |
|                 if txt == anchor or txt == anchor.split(".")[-1]:
 | |
|                     self.body.append(f'<p id="{ref_anchor[1]}"/>')
 | |
|         return old_call(self, node)
 | |
| 
 | |
|     Klass.visit_reference = visit_reference
 | |
| 
 | |
| 
 | |
| replace(html.HTMLTranslator)
 | |
| replace(html5.HTML5Translator)
 | |
| 
 | |
| # -- Options for HTMLHelp output ------------------------------------------
 | |
| 
 | |
| # Output file base name for HTML help builder.
 | |
| htmlhelp_basename = "PyTorchdoc"
 | |
| 
 | |
| 
 | |
| # -- Options for LaTeX output ---------------------------------------------
 | |
| 
 | |
| latex_engine = "lualatex"
 | |
| latex_show_urls = "footnote"
 | |
| 
 | |
| latex_elements = {
 | |
|     "papersize": "letterpaper",
 | |
|     "pointsize": "10pt",
 | |
|     "tableofcontents": r"\pdfbookmark[0]{Contents}{toc}\tableofcontents",
 | |
|     "preamble": r"""
 | |
|        \usepackage{tocloft}
 | |
|        \setcounter{tocdepth}{3}
 | |
|        \setcounter{secnumdepth}{3}
 | |
|        % Fix table column widths
 | |
|        \renewenvironment{tabulary}{\begin{longtable}{p{0.3\linewidth}p{0.7\linewidth}}}{\end{longtable}}
 | |
| 
 | |
|        % Ensure tables don't overflow
 | |
|        \AtBeginEnvironment{tabular}{\sloppy}
 | |
|     """,
 | |
|     "fncychap": r"\usepackage[Bjornstrup]{fncychap}",
 | |
|     "extraclassoptions": "oneside",
 | |
| }
 | |
| 
 | |
| # Grouping the document tree into LaTeX files. List of tuples
 | |
| # (source start file, target name, title,
 | |
| #  author, documentclass [howto, manual, or own class]).
 | |
| 
 | |
| 
 | |
| latex_documents = [
 | |
|     (
 | |
|         master_doc,
 | |
|         "pytorch.tex",
 | |
|         "PyTorch Documentation",
 | |
|         "Torch Contributors",
 | |
|         "manual",
 | |
|     ),
 | |
| ]
 | |
| latex_use_xindy = False
 | |
| 
 | |
| 
 | |
| # -- Options for manual page output ---------------------------------------
 | |
| 
 | |
| # One entry per manual page. List of tuples
 | |
| # (source start file, name, description, authors, manual section).
 | |
| man_pages = [(master_doc, "PyTorch", "PyTorch Documentation", [author], 1)]
 | |
| 
 | |
| 
 | |
| # -- Options for Texinfo output -------------------------------------------
 | |
| 
 | |
| # Grouping the document tree into Texinfo files. List of tuples
 | |
| # (source start file, target name, title, author,
 | |
| #  dir menu entry, description, category)
 | |
| texinfo_documents = [
 | |
|     (
 | |
|         master_doc,
 | |
|         "PyTorch",
 | |
|         "PyTorch Documentation",
 | |
|         author,
 | |
|         "PyTorch",
 | |
|         "One line description of project.",
 | |
|         "Miscellaneous",
 | |
|     ),
 | |
| ]
 | |
| 
 | |
| 
 | |
| # Example configuration for intersphinx: refer to the Python standard library.
 | |
| intersphinx_mapping = {
 | |
|     "python": ("https://docs.python.org/3", None),
 | |
|     "numpy": ("https://numpy.org/doc/stable", None),
 | |
| }
 | |
| 
 | |
| import sphinx.ext.doctest
 | |
| 
 | |
| # -- A patch that prevents Sphinx from cross-referencing ivar tags -------
 | |
| # See http://stackoverflow.com/a/41184353/3343043
 | |
| from docutils import nodes
 | |
| from sphinx import addnodes
 | |
| from sphinx.util.docfields import TypedField
 | |
| 
 | |
| 
 | |
| # Without this, doctest adds any example with a `>>>` as a test
 | |
| doctest_test_doctest_blocks = ""
 | |
| doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
 | |
| doctest_global_setup = """
 | |
| import torch
 | |
| try:
 | |
|     import torchvision
 | |
| except ImportError:
 | |
|     torchvision = None
 | |
| """
 | |
| 
 | |
| 
 | |
| def patched_make_field(self, types, domain, items, **kw):
 | |
|     # `kw` catches `env=None` needed for newer sphinx while maintaining
 | |
|     #  backwards compatibility when passed along further down!
 | |
| 
 | |
|     # type: (List, unicode, Tuple) -> nodes.field
 | |
|     def handle_item(fieldarg, content):
 | |
|         par = nodes.paragraph()
 | |
|         par += addnodes.literal_strong("", fieldarg)  # Patch: this line added
 | |
|         # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
 | |
|         #                           addnodes.literal_strong))
 | |
|         if fieldarg in types:
 | |
|             par += nodes.Text(" (")
 | |
|             # NOTE: using .pop() here to prevent a single type node to be
 | |
|             # inserted twice into the doctree, which leads to
 | |
|             # inconsistencies later when references are resolved
 | |
|             fieldtype = types.pop(fieldarg)
 | |
|             if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
 | |
|                 typename = fieldtype[0].astext()
 | |
|                 builtin_types = ["int", "long", "float", "bool", "type"]
 | |
|                 for builtin_type in builtin_types:
 | |
|                     pattern = rf"(?<![\w.]){builtin_type}(?![\w.])"
 | |
|                     repl = f"python:{builtin_type}"
 | |
|                     typename = re.sub(pattern, repl, typename)
 | |
|                 par.extend(
 | |
|                     self.make_xrefs(
 | |
|                         self.typerolename,
 | |
|                         domain,
 | |
|                         typename,
 | |
|                         addnodes.literal_emphasis,
 | |
|                         **kw,
 | |
|                     )
 | |
|                 )
 | |
|             else:
 | |
|                 par += fieldtype
 | |
|             par += nodes.Text(")")
 | |
|         par += nodes.Text(" -- ")
 | |
|         par += content
 | |
|         return par
 | |
| 
 | |
|     fieldname = nodes.field_name("", self.label)
 | |
|     if len(items) == 1 and self.can_collapse:
 | |
|         fieldarg, content = items[0]
 | |
|         bodynode = handle_item(fieldarg, content)
 | |
|     else:
 | |
|         bodynode = self.list_type()
 | |
|         for fieldarg, content in items:
 | |
|             bodynode += nodes.list_item("", handle_item(fieldarg, content))
 | |
|     fieldbody = nodes.field_body("", bodynode)
 | |
|     return nodes.field("", fieldname, fieldbody)
 | |
| 
 | |
| 
 | |
| TypedField.make_field = patched_make_field
 | |
| 
 | |
| copybutton_prompt_text = r">>> |\.\.\. "
 | |
| copybutton_prompt_is_regexp = True
 |