From 734281c3d6c960767581e14d88f16dd113a36f11 Mon Sep 17 00:00:00 2001 From: Alban Desmaison Date: Thu, 10 Mar 2022 14:21:03 -0800 Subject: [PATCH] Cleanup all module references in doc (#73983) Summary: Working towards https://docs.google.com/document/d/10yx2-4gs0gTMOimVS403MnoAWkqitS8TUHX73PN8EjE/edit?pli=1# This PR: - Ensure that all the submodules are listed in a rst file (that ensure they are considered by the coverage tool) - Remove some long deprecated code that just error out on import - Remove the allow list altogether to ensure nothing gets added back there Pull Request resolved: https://github.com/pytorch/pytorch/pull/73983 Reviewed By: anjali411 Differential Revision: D34787908 Pulled By: albanD fbshipit-source-id: 163ce61e133b12b2f2e1cbe374f979e3d6858db7 (cherry picked from commit c9edfead7a01dc45bfc24eaf7220d2a84ab1f62e) --- docs/source/amp.rst | 5 + docs/source/backends.rst | 12 ++ docs/source/benchmark_utils.rst | 7 ++ docs/source/bottleneck.rst | 1 + docs/source/conf.py | 175 +++++++++------------------ docs/source/data.rst | 12 ++ docs/source/distributed.rst | 18 +++ docs/source/fft.rst | 2 - docs/source/fx.rst | 9 ++ docs/source/jit.rst | 4 + docs/source/nn.rst | 9 ++ docs/source/package.rst | 3 + docs/source/quantization-support.rst | 24 ++++ docs/source/quantization.rst | 22 ++++ docs/source/sparse.rst | 2 + docs/source/special.rst | 2 - docs/source/tensorboard.rst | 1 + docs/source/torch.rst | 24 ++-- test/test_utils.py | 6 - torch/for_onnx/__init__.py | 1 - torch/utils/ffi/__init__.py | 1 - 21 files changed, 205 insertions(+), 135 deletions(-) delete mode 100644 torch/for_onnx/__init__.py delete mode 100644 torch/utils/ffi/__init__.py diff --git a/docs/source/amp.rst b/docs/source/amp.rst index 1f70f2c6982e..e0e2f2147aa7 100644 --- a/docs/source/amp.rst +++ b/docs/source/amp.rst @@ -4,6 +4,11 @@ Automatic Mixed Precision package - torch.cuda.amp ================================================== +.. Both modules below are missing doc entry. Adding them here for now. +.. This does not add anything to the rendered page +.. py:module:: torch.cpu +.. py:module:: torch.cpu.amp + .. automodule:: torch.cuda.amp .. currentmodule:: torch.cuda.amp diff --git a/docs/source/backends.rst b/docs/source/backends.rst index 45d6fdf2add2..2b49e4c93416 100644 --- a/docs/source/backends.rst +++ b/docs/source/backends.rst @@ -3,6 +3,7 @@ torch.backends ============== +.. automodule:: torch.backends `torch.backends` controls the behavior of various backends that PyTorch supports. @@ -17,6 +18,7 @@ These backends include: torch.backends.cuda ^^^^^^^^^^^^^^^^^^^ +.. automodule:: torch.backends.cuda .. autofunction:: torch.backends.cuda.is_built @@ -50,6 +52,7 @@ torch.backends.cuda torch.backends.cudnn ^^^^^^^^^^^^^^^^^^^^ +.. automodule:: torch.backends.cudnn .. autofunction:: torch.backends.cudnn.version @@ -78,17 +81,26 @@ torch.backends.cudnn torch.backends.mkl ^^^^^^^^^^^^^^^^^^ +.. automodule:: torch.backends.mkl .. autofunction:: torch.backends.mkl.is_available torch.backends.mkldnn ^^^^^^^^^^^^^^^^^^^^^ +.. automodule:: torch.backends.mkldnn .. autofunction:: torch.backends.mkldnn.is_available torch.backends.openmp ^^^^^^^^^^^^^^^^^^^^^ +.. automodule:: torch.backends.openmp .. autofunction:: torch.backends.openmp.is_available + +.. Docs for other backends need to be added here. +.. Automodules are just here to ensure checks run but they don't actually +.. add anything to the rendered page for now. +.. py:module:: torch.backends.quantized +.. py:module:: torch.backends.xnnpack diff --git a/docs/source/benchmark_utils.rst b/docs/source/benchmark_utils.rst index c211dcb7b580..c93fbfd66c3d 100644 --- a/docs/source/benchmark_utils.rst +++ b/docs/source/benchmark_utils.rst @@ -18,3 +18,10 @@ Benchmark Utils - torch.utils.benchmark .. autoclass:: FunctionCounts :members: + +.. These are missing documentation. Adding them here until a better place +.. is made in this file. +.. py:module:: torch.utils.benchmark.examples +.. py:module:: torch.utils.benchmark.op_fuzzers +.. py:module:: torch.utils.benchmark.utils +.. py:module:: torch.utils.benchmark.utils.valgrind_wrapper diff --git a/docs/source/bottleneck.rst b/docs/source/bottleneck.rst index d6ce122234fb..3fa1c99b5061 100644 --- a/docs/source/bottleneck.rst +++ b/docs/source/bottleneck.rst @@ -1,6 +1,7 @@ torch.utils.bottleneck ====================== +.. automodule:: torch.utils.bottleneck .. currentmodule:: torch.utils.bottleneck `torch.utils.bottleneck` is a tool that can be used as an initial step for diff --git a/docs/source/conf.py b/docs/source/conf.py index c77612fa15de..d36deda65a19 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -86,6 +86,8 @@ templates_path = ['_templates'] # TODO: document these and remove them from here. coverage_ignore_functions = [ + # torch + "typename", # torch.autograd "register_py_tensor_class_for_device", "variable", @@ -129,9 +131,41 @@ coverage_ignore_functions = [ "execWrapper", # torch.onnx "unregister_custom_op_symbolic", + # torch.ao.quantization + "default_eval_fn", + # torch.ao.quantization.fx.backend_config + "validate_backend_config_dict", + # torch.backends + "disable_global_flags", + "flags_frozen", + # torch.distributed.algorithms.ddp_comm_hooks + "register_ddp_comm_hook", + # torch.nn + "factory_kwargs", + # torch.nn.parallel + "DistributedDataParallelCPU", + # torch.utils + "set_module", + # torch.utils.model_dump + "burn_in_info", + "get_info_and_burn_skeleton", + "get_inline_skeleton", + "get_model_info", + "get_storage_info", + "hierarchical_pickle", ] coverage_ignore_classes = [ + # torch + "FatalError", + "QUInt2x4Storage", + "Size", + "Storage", + "Stream", + "Tensor", + "finfo", + "iinfo", + "qscheme", # torch.cuda "BFloat16Storage", "BFloat16Tensor", @@ -197,109 +231,25 @@ coverage_ignore_classes = [ # torch.onnx "CheckerError", "ExportTypes", + # torch.backends + "ContextProp", + "PropModule", + # torch.backends.cuda + "cuBLASModule", + "cuFFTPlanCache", + "cuFFTPlanCacheAttrContextProp", + "cuFFTPlanCacheManager", + # torch.distributed.algorithms.ddp_comm_hooks + "DDPCommHookType", + # torch.jit.mobile + "LiteScriptModule", + # torch.nn.quantized.modules + "DeQuantize", + "Quantize", + # torch.utils.backcompat + "Warning", ] -# List of modules that do not have automodule/py:module in the doc yet -# We should NOT add anything to this list, see the CI failure message -# on how to solve missing automodule issues -coverage_missing_automodule = [ - "torch", - "torch.ao", - "torch.ao.nn", - "torch.ao.nn.sparse", - "torch.ao.nn.sparse.quantized", - "torch.ao.nn.sparse.quantized.dynamic", - "torch.ao.ns", - "torch.ao.ns.fx", - "torch.ao.quantization", - "torch.ao.quantization.fx", - "torch.ao.quantization.fx.backend_config", - "torch.ao.sparsity", - "torch.ao.sparsity.experimental", - "torch.ao.sparsity.experimental.pruner", - "torch.ao.sparsity.scheduler", - "torch.ao.sparsity.sparsifier", - "torch.backends", - "torch.backends.cuda", - "torch.backends.cudnn", - "torch.backends.mkl", - "torch.backends.mkldnn", - "torch.backends.openmp", - "torch.backends.quantized", - "torch.backends.xnnpack", - "torch.contrib", - "torch.cpu", - "torch.cpu.amp", - "torch.distributed.algorithms", - "torch.distributed.algorithms.ddp_comm_hooks", - "torch.distributed.algorithms.model_averaging", - "torch.distributed.elastic", - "torch.distributed.elastic.utils", - "torch.distributed.elastic.utils.data", - "torch.distributed.launcher", - "torch.distributed.nn", - "torch.distributed.nn.api", - "torch.distributed.nn.jit", - "torch.distributed.nn.jit.templates", - "torch.distributed.pipeline", - "torch.distributed.pipeline.sync", - "torch.distributed.pipeline.sync.skip", - "torch.fft", - "torch.for_onnx", - "torch.fx.experimental", - "torch.fx.experimental.unification", - "torch.fx.experimental.unification.multipledispatch", - "torch.fx.passes", - "torch.jit.mobile", - "torch.nn", - "torch.nn.backends", - "torch.nn.intrinsic", - "torch.nn.intrinsic.modules", - "torch.nn.intrinsic.qat", - "torch.nn.intrinsic.qat.modules", - "torch.nn.intrinsic.quantized", - "torch.nn.intrinsic.quantized.dynamic", - "torch.nn.intrinsic.quantized.dynamic.modules", - "torch.nn.intrinsic.quantized.modules", - "torch.nn.modules", - "torch.nn.parallel", - "torch.nn.qat", - "torch.nn.qat.modules", - "torch.nn.qat.dynamic", - "torch.nn.qat.dynamic.modules", - "torch.nn.quantizable", - "torch.nn.quantizable.modules", - "torch.nn.quantized", - "torch.nn.quantized.dynamic", - "torch.nn.quantized.dynamic.modules", - "torch.nn.quantized.modules", - "torch.nn.utils", - "torch.package", - "torch.package.analyze", - "torch.quantization", - "torch.quantization.fx", - "torch.sparse", - "torch.special", - "torch.utils", - "torch.utils.backcompat", - "torch.utils.benchmark.examples", - "torch.utils.benchmark.op_fuzzers", - "torch.utils.benchmark.utils", - "torch.utils.benchmark.utils.valgrind_wrapper", - "torch.utils.bottleneck", - "torch.utils.data.communication", - "torch.utils.data.datapipes", - "torch.utils.data.datapipes.dataframe", - "torch.utils.data.datapipes.iter", - "torch.utils.data.datapipes.map", - "torch.utils.data.datapipes.utils", - "torch.utils.ffi", - "torch.utils.hipify", - "torch.utils.model_dump", - "torch.utils.tensorboard", -] - - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # @@ -417,6 +367,11 @@ def coverage_post_process(app, exception): if not isinstance(app.builder, CoverageBuilder): return + if not torch.distributed.is_available(): + raise RuntimeError("The coverage tool cannot run with a version " + "of PyTorch that was built with USE_DISTRIBUTED=0 " + "as this module's API changes.") + # These are all the modules that have "automodule" in an rst file # These modules are the ones for which coverage is checked # Here, we make sure that no module is missing from that list @@ -443,26 +398,16 @@ def coverage_post_process(app, exception): if modname not in modules: missing.add(modname) - expected = set(coverage_missing_automodule) - output = [] - unexpected_missing = missing - expected - if unexpected_missing: - mods = ", ".join(unexpected_missing) + if missing: + mods = ", ".join(missing) output.append(f"\nYou added the following module(s) to the PyTorch namespace '{mods}' " "but they have no corresponding entry in a doc .rst file. You should " "either make sure that the .rst file that contains the module's documentation " "properly contains either '.. automodule:: mod_name' (if you do not want " - "the paragraph added by the automodule, you can simply use py:module) or " - "make the module private (by appending an '_' at the beginning of its name.") - - unexpected_not_missing = expected - missing - if unexpected_not_missing: - mods = ", ".join(unexpected_not_missing) - output.append(f"\nThank you for adding the missing .rst entries for '{mods}', please update " - "the 'coverage_missing_automodule' in 'torch/docs/source/conf.py' to remove " - "the module(s) you fixed and make sure we do not regress on this in the future.") + "the paragraph added by the automodule, you can simply use '.. py:module:: mod_name') " + " or make the module private (by appending an '_' at the beginning of its name).") # The output file is hard-coded by the coverage tool # Our CI is setup to fail if any line is added to this file diff --git a/docs/source/data.rst b/docs/source/data.rst index 322de88e27d9..646f41436caf 100644 --- a/docs/source/data.rst +++ b/docs/source/data.rst @@ -432,3 +432,15 @@ Example:: .. autoclass:: torch.utils.data.WeightedRandomSampler .. autoclass:: torch.utils.data.BatchSampler .. autoclass:: torch.utils.data.distributed.DistributedSampler + + +.. This module is experimental and should be private, adding it here for now +.. py:module:: torch.utils.data.communication + +.. These modules are documented as part of torch/data listing them here for +.. now until we have a clearer fix +.. py:module:: torch.utils.data.datapipes +.. py:module:: torch.utils.data.datapipes.dataframe +.. py:module:: torch.utils.data.datapipes.iter +.. py:module:: torch.utils.data.datapipes.map +.. py:module:: torch.utils.data.datapipes.utils diff --git a/docs/source/distributed.rst b/docs/source/distributed.rst index 6c956c684222..790dd415544c 100644 --- a/docs/source/distributed.rst +++ b/docs/source/distributed.rst @@ -808,3 +808,21 @@ following matrix shows how the log level can be adjusted via the combination of +-------------------------+-----------------------------+------------------------+ | ``INFO`` | ``DETAIL`` | Trace (a.k.a. All) | +-------------------------+-----------------------------+------------------------+ + + +.. Distributed modules that are missing specific entries. +.. Adding them here for tracking purposes until they are more permanently fixed. +.. py:module:: torch.distributed.algorithms +.. py:module:: torch.distributed.algorithms.ddp_comm_hooks +.. py:module:: torch.distributed.algorithms.model_averaging +.. py:module:: torch.distributed.elastic +.. py:module:: torch.distributed.elastic.utils +.. py:module:: torch.distributed.elastic.utils.data +.. py:module:: torch.distributed.launcher +.. py:module:: torch.distributed.nn +.. py:module:: torch.distributed.nn.api +.. py:module:: torch.distributed.nn.jit +.. py:module:: torch.distributed.nn.jit.templates +.. py:module:: torch.distributed.pipeline +.. py:module:: torch.distributed.pipeline.sync +.. py:module:: torch.distributed.pipeline.sync.skip diff --git a/docs/source/fft.rst b/docs/source/fft.rst index 05f6215af513..5406b6610a60 100644 --- a/docs/source/fft.rst +++ b/docs/source/fft.rst @@ -7,8 +7,6 @@ torch.fft Discrete Fourier transforms and related functions. .. automodule:: torch.fft - :noindex: - .. currentmodule:: torch.fft Fast Fourier Transforms diff --git a/docs/source/fx.rst b/docs/source/fx.rst index 65689930743d..de1e1b88f93e 100644 --- a/docs/source/fx.rst +++ b/docs/source/fx.rst @@ -1109,3 +1109,12 @@ API Reference :members: .. autofunction:: torch.fx.replace_pattern + + +.. The experimental and passes submodules are missing docs. +.. Adding it here for coverage but this doesn't add anything to the +.. rendered doc. +.. py:module:: torch.fx.passes +.. py:module:: torch.fx.experimental +.. py:module:: torch.fx.experimental.unification +.. py:module:: torch.fx.experimental.unification.multipledispatch diff --git a/docs/source/jit.rst b/docs/source/jit.rst index 23426fb3d9ea..d2d55215aa3f 100644 --- a/docs/source/jit.rst +++ b/docs/source/jit.rst @@ -878,3 +878,7 @@ References jit_python_reference jit_unsupported + +.. This package is missing doc. Adding it here for coverage +.. This does not add anything to the rendered page. +.. py:module:: torch.jit.mobile diff --git a/docs/source/nn.rst b/docs/source/nn.rst index 6eca9d4b16b6..0e9d161c014b 100644 --- a/docs/source/nn.rst +++ b/docs/source/nn.rst @@ -3,6 +3,8 @@ torch.nn =================================== +.. automodule:: torch.nn +.. automodule:: torch.nn.modules These are the basic building blocks for graphs: @@ -331,6 +333,8 @@ Shuffle Layers DataParallel Layers (multi-GPU, distributed) -------------------------------------------- +.. automodule:: torch.nn.parallel +.. currentmodule:: torch .. autosummary:: :toctree: generated @@ -342,6 +346,7 @@ DataParallel Layers (multi-GPU, distributed) Utilities --------- +.. automodule:: torch.nn.utils From the ``torch.nn.utils`` module @@ -453,3 +458,7 @@ Lazy Modules Initialization :template: classtemplate.rst nn.modules.lazy.LazyModuleMixin + + +.. This module is kept only for backward compatibility +.. py:module:: torch.nn.backends diff --git a/docs/source/package.rst b/docs/source/package.rst index c7881f196140..b72112ffed31 100644 --- a/docs/source/package.rst +++ b/docs/source/package.rst @@ -1,3 +1,6 @@ +.. automodule:: torch.package +.. py:module:: torch.package.analyze + .. currentmodule:: torch.package torch.package diff --git a/docs/source/quantization-support.rst b/docs/source/quantization-support.rst index 78c5ea247c48..da6649a2fee3 100644 --- a/docs/source/quantization-support.rst +++ b/docs/source/quantization-support.rst @@ -217,6 +217,8 @@ to configure quantization settings for individual ops. torch.nn.intrinsic ~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.intrinsic +.. automodule:: torch.nn.intrinsic.modules This module implements the combined (fused) modules conv + relu which can then be quantized. @@ -243,6 +245,9 @@ then be quantized. torch.nn.intrinsic.qat ~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.intrinsic.qat +.. automodule:: torch.nn.intrinsic.qat.modules + This module implements the versions of those fused operations needed for quantization aware training. @@ -268,6 +273,9 @@ quantization aware training. torch.nn.intrinsic.quantized ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.intrinsic.quantized +.. automodule:: torch.nn.intrinsic.quantized.modules + This module implements the quantized implementations of fused operations like conv + relu. No BatchNorm variants as it's usually folded into convolution @@ -289,6 +297,8 @@ for inference. torch.nn.intrinsic.quantized.dynamic ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.intrinsic.quantized.dynamic +.. automodule:: torch.nn.intrinsic.quantized.dynamic.modules This module implements the quantized dynamic implementations of fused operations like linear + relu. @@ -304,6 +314,8 @@ like linear + relu. torch.nn.qat ~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.qat +.. automodule:: torch.nn.qat.modules This module implements versions of the key nn modules **Conv2d()** and **Linear()** which run in FP32 but with rounding applied to simulate the @@ -322,6 +334,8 @@ effect of INT8 quantization. torch.nn.qat.dynamic ~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.qat.dynamic +.. automodule:: torch.nn.qat.dynamic.modules This module implements versions of the key nn modules such as **Linear()** which run in FP32 but with rounding applied to simulate the effect of INT8 @@ -338,6 +352,8 @@ quantization and will be dynamically quantized during inference. torch.nn.quantized ~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.quantized +.. automodule:: torch.nn.quantized.modules This module implements the quantized versions of the nn layers such as ~`torch.nn.Conv2d` and `torch.nn.ReLU`. @@ -376,6 +392,7 @@ This module implements the quantized versions of the nn layers such as torch.nn.quantized.functional ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.quantized.functional This module implements the quantized versions of the functional layers such as ~`torch.nn.functional.conv2d` and `torch.nn.functional.relu`. Note: @@ -413,6 +430,8 @@ This module implements the quantized versions of the functional layers such as torch.nn.quantized.dynamic ~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. automodule:: torch.nn.quantized.dynamic +.. automodule:: torch.nn.quantized.dynamic.modules Dynamically quantized :class:`~torch.nn.Linear`, :class:`~torch.nn.LSTM`, :class:`~torch.nn.LSTMCell`, :class:`~torch.nn.GRUCell`, and @@ -492,3 +511,8 @@ the `custom operator mechanism `_ module. .. automodule:: torch.special - :noindex: - .. currentmodule:: torch.special Functions diff --git a/docs/source/tensorboard.rst b/docs/source/tensorboard.rst index d3205e3ba589..8cd138369288 100644 --- a/docs/source/tensorboard.rst +++ b/docs/source/tensorboard.rst @@ -1,5 +1,6 @@ torch.utils.tensorboard =================================== +.. automodule:: torch.utils.tensorboard Before going further, more details on TensorBoard can be found at https://www.tensorflow.org/tensorboard/ diff --git a/docs/source/torch.rst b/docs/source/torch.rst index e09675af82a1..e4062b6096f0 100644 --- a/docs/source/torch.rst +++ b/docs/source/torch.rst @@ -1,13 +1,6 @@ torch ===== -The torch package contains data structures for multi-dimensional -tensors and defines mathematical operations over these tensors. -Additionally, it provides many utilities for efficient serializing of -Tensors and arbitrary types, and other useful utilities. - -It has a CUDA counterpart, that enables you to run your tensor computations -on an NVIDIA GPU with compute capability >= 3.0 - +.. automodule:: torch .. currentmodule:: torch Tensors @@ -615,3 +608,18 @@ Utilities is_warn_always_enabled vmap _assert + + +.. Empty submodules added only for tracking. +.. py:module:: torch.contrib +.. py:module:: torch.utils.backcompat + +.. This submodule is split manually without a top level page. +.. py:module:: torch.utils + +.. This module is only used internally for ROCm builds. +.. py:module:: torch.utils.hipify + +.. This module needs to be documented. Adding here in the meantime +.. for tracking purposes +.. py:module:: torch.utils.model_dump diff --git a/test/test_utils.py b/test/test_utils.py index c8f4e3aa9453..f5018197249e 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -411,12 +411,6 @@ class TestDataLoaderUtils(TestCase): test_dir = os.path.abspath(os.path.dirname(str(__file__))) -class TestFFI(TestCase): - def test_deprecated(self): - with self.assertRaisesRegex(ImportError, "torch.utils.ffi is deprecated. Please use cpp extensions instead."): - from torch.utils.ffi import create_extension # type: ignore[attr-defined] # noqa: F401 - - @unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set') class TestBottleneck(TestCase): def _run(self, command, timeout=30): diff --git a/torch/for_onnx/__init__.py b/torch/for_onnx/__init__.py deleted file mode 100644 index 30c8a0298045..000000000000 --- a/torch/for_onnx/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .onnx import * # noqa: F403 diff --git a/torch/utils/ffi/__init__.py b/torch/utils/ffi/__init__.py deleted file mode 100644 index e47a4f8a3417..000000000000 --- a/torch/utils/ffi/__init__.py +++ /dev/null @@ -1 +0,0 @@ -raise ImportError("torch.utils.ffi is deprecated. Please use cpp extensions instead.")