mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Cleanup all module references in doc (#73983)
Summary: Working towards https://docs.google.com/document/d/10yx2-4gs0gTMOimVS403MnoAWkqitS8TUHX73PN8EjE/edit?pli=1# This PR: - Ensure that all the submodules are listed in a rst file (that ensure they are considered by the coverage tool) - Remove some long deprecated code that just error out on import - Remove the allow list altogether to ensure nothing gets added back there Pull Request resolved: https://github.com/pytorch/pytorch/pull/73983 Reviewed By: anjali411 Differential Revision: D34787908 Pulled By: albanD fbshipit-source-id: 163ce61e133b12b2f2e1cbe374f979e3d6858db7 (cherry picked from commit c9edfead7a01dc45bfc24eaf7220d2a84ab1f62e)
This commit is contained in:
committed by
PyTorch MergeBot
parent
6656c71049
commit
734281c3d6
@ -4,6 +4,11 @@
|
||||
Automatic Mixed Precision package - torch.cuda.amp
|
||||
==================================================
|
||||
|
||||
.. Both modules below are missing doc entry. Adding them here for now.
|
||||
.. This does not add anything to the rendered page
|
||||
.. py:module:: torch.cpu
|
||||
.. py:module:: torch.cpu.amp
|
||||
|
||||
.. automodule:: torch.cuda.amp
|
||||
.. currentmodule:: torch.cuda.amp
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
torch.backends
|
||||
==============
|
||||
.. automodule:: torch.backends
|
||||
|
||||
`torch.backends` controls the behavior of various backends that PyTorch supports.
|
||||
|
||||
@ -17,6 +18,7 @@ These backends include:
|
||||
|
||||
torch.backends.cuda
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
.. automodule:: torch.backends.cuda
|
||||
|
||||
.. autofunction:: torch.backends.cuda.is_built
|
||||
|
||||
@ -50,6 +52,7 @@ torch.backends.cuda
|
||||
|
||||
torch.backends.cudnn
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
.. automodule:: torch.backends.cudnn
|
||||
|
||||
.. autofunction:: torch.backends.cudnn.version
|
||||
|
||||
@ -78,17 +81,26 @@ torch.backends.cudnn
|
||||
|
||||
torch.backends.mkl
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
.. automodule:: torch.backends.mkl
|
||||
|
||||
.. autofunction:: torch.backends.mkl.is_available
|
||||
|
||||
|
||||
torch.backends.mkldnn
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
.. automodule:: torch.backends.mkldnn
|
||||
|
||||
.. autofunction:: torch.backends.mkldnn.is_available
|
||||
|
||||
|
||||
torch.backends.openmp
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
.. automodule:: torch.backends.openmp
|
||||
|
||||
.. autofunction:: torch.backends.openmp.is_available
|
||||
|
||||
.. Docs for other backends need to be added here.
|
||||
.. Automodules are just here to ensure checks run but they don't actually
|
||||
.. add anything to the rendered page for now.
|
||||
.. py:module:: torch.backends.quantized
|
||||
.. py:module:: torch.backends.xnnpack
|
||||
|
@ -18,3 +18,10 @@ Benchmark Utils - torch.utils.benchmark
|
||||
|
||||
.. autoclass:: FunctionCounts
|
||||
:members:
|
||||
|
||||
.. These are missing documentation. Adding them here until a better place
|
||||
.. is made in this file.
|
||||
.. py:module:: torch.utils.benchmark.examples
|
||||
.. py:module:: torch.utils.benchmark.op_fuzzers
|
||||
.. py:module:: torch.utils.benchmark.utils
|
||||
.. py:module:: torch.utils.benchmark.utils.valgrind_wrapper
|
||||
|
@ -1,6 +1,7 @@
|
||||
torch.utils.bottleneck
|
||||
======================
|
||||
|
||||
.. automodule:: torch.utils.bottleneck
|
||||
.. currentmodule:: torch.utils.bottleneck
|
||||
|
||||
`torch.utils.bottleneck` is a tool that can be used as an initial step for
|
||||
|
@ -86,6 +86,8 @@ templates_path = ['_templates']
|
||||
# TODO: document these and remove them from here.
|
||||
|
||||
coverage_ignore_functions = [
|
||||
# torch
|
||||
"typename",
|
||||
# torch.autograd
|
||||
"register_py_tensor_class_for_device",
|
||||
"variable",
|
||||
@ -129,9 +131,41 @@ coverage_ignore_functions = [
|
||||
"execWrapper",
|
||||
# torch.onnx
|
||||
"unregister_custom_op_symbolic",
|
||||
# torch.ao.quantization
|
||||
"default_eval_fn",
|
||||
# torch.ao.quantization.fx.backend_config
|
||||
"validate_backend_config_dict",
|
||||
# torch.backends
|
||||
"disable_global_flags",
|
||||
"flags_frozen",
|
||||
# torch.distributed.algorithms.ddp_comm_hooks
|
||||
"register_ddp_comm_hook",
|
||||
# torch.nn
|
||||
"factory_kwargs",
|
||||
# torch.nn.parallel
|
||||
"DistributedDataParallelCPU",
|
||||
# torch.utils
|
||||
"set_module",
|
||||
# torch.utils.model_dump
|
||||
"burn_in_info",
|
||||
"get_info_and_burn_skeleton",
|
||||
"get_inline_skeleton",
|
||||
"get_model_info",
|
||||
"get_storage_info",
|
||||
"hierarchical_pickle",
|
||||
]
|
||||
|
||||
coverage_ignore_classes = [
|
||||
# torch
|
||||
"FatalError",
|
||||
"QUInt2x4Storage",
|
||||
"Size",
|
||||
"Storage",
|
||||
"Stream",
|
||||
"Tensor",
|
||||
"finfo",
|
||||
"iinfo",
|
||||
"qscheme",
|
||||
# torch.cuda
|
||||
"BFloat16Storage",
|
||||
"BFloat16Tensor",
|
||||
@ -197,109 +231,25 @@ coverage_ignore_classes = [
|
||||
# torch.onnx
|
||||
"CheckerError",
|
||||
"ExportTypes",
|
||||
# torch.backends
|
||||
"ContextProp",
|
||||
"PropModule",
|
||||
# torch.backends.cuda
|
||||
"cuBLASModule",
|
||||
"cuFFTPlanCache",
|
||||
"cuFFTPlanCacheAttrContextProp",
|
||||
"cuFFTPlanCacheManager",
|
||||
# torch.distributed.algorithms.ddp_comm_hooks
|
||||
"DDPCommHookType",
|
||||
# torch.jit.mobile
|
||||
"LiteScriptModule",
|
||||
# torch.nn.quantized.modules
|
||||
"DeQuantize",
|
||||
"Quantize",
|
||||
# torch.utils.backcompat
|
||||
"Warning",
|
||||
]
|
||||
|
||||
# List of modules that do not have automodule/py:module in the doc yet
|
||||
# We should NOT add anything to this list, see the CI failure message
|
||||
# on how to solve missing automodule issues
|
||||
coverage_missing_automodule = [
|
||||
"torch",
|
||||
"torch.ao",
|
||||
"torch.ao.nn",
|
||||
"torch.ao.nn.sparse",
|
||||
"torch.ao.nn.sparse.quantized",
|
||||
"torch.ao.nn.sparse.quantized.dynamic",
|
||||
"torch.ao.ns",
|
||||
"torch.ao.ns.fx",
|
||||
"torch.ao.quantization",
|
||||
"torch.ao.quantization.fx",
|
||||
"torch.ao.quantization.fx.backend_config",
|
||||
"torch.ao.sparsity",
|
||||
"torch.ao.sparsity.experimental",
|
||||
"torch.ao.sparsity.experimental.pruner",
|
||||
"torch.ao.sparsity.scheduler",
|
||||
"torch.ao.sparsity.sparsifier",
|
||||
"torch.backends",
|
||||
"torch.backends.cuda",
|
||||
"torch.backends.cudnn",
|
||||
"torch.backends.mkl",
|
||||
"torch.backends.mkldnn",
|
||||
"torch.backends.openmp",
|
||||
"torch.backends.quantized",
|
||||
"torch.backends.xnnpack",
|
||||
"torch.contrib",
|
||||
"torch.cpu",
|
||||
"torch.cpu.amp",
|
||||
"torch.distributed.algorithms",
|
||||
"torch.distributed.algorithms.ddp_comm_hooks",
|
||||
"torch.distributed.algorithms.model_averaging",
|
||||
"torch.distributed.elastic",
|
||||
"torch.distributed.elastic.utils",
|
||||
"torch.distributed.elastic.utils.data",
|
||||
"torch.distributed.launcher",
|
||||
"torch.distributed.nn",
|
||||
"torch.distributed.nn.api",
|
||||
"torch.distributed.nn.jit",
|
||||
"torch.distributed.nn.jit.templates",
|
||||
"torch.distributed.pipeline",
|
||||
"torch.distributed.pipeline.sync",
|
||||
"torch.distributed.pipeline.sync.skip",
|
||||
"torch.fft",
|
||||
"torch.for_onnx",
|
||||
"torch.fx.experimental",
|
||||
"torch.fx.experimental.unification",
|
||||
"torch.fx.experimental.unification.multipledispatch",
|
||||
"torch.fx.passes",
|
||||
"torch.jit.mobile",
|
||||
"torch.nn",
|
||||
"torch.nn.backends",
|
||||
"torch.nn.intrinsic",
|
||||
"torch.nn.intrinsic.modules",
|
||||
"torch.nn.intrinsic.qat",
|
||||
"torch.nn.intrinsic.qat.modules",
|
||||
"torch.nn.intrinsic.quantized",
|
||||
"torch.nn.intrinsic.quantized.dynamic",
|
||||
"torch.nn.intrinsic.quantized.dynamic.modules",
|
||||
"torch.nn.intrinsic.quantized.modules",
|
||||
"torch.nn.modules",
|
||||
"torch.nn.parallel",
|
||||
"torch.nn.qat",
|
||||
"torch.nn.qat.modules",
|
||||
"torch.nn.qat.dynamic",
|
||||
"torch.nn.qat.dynamic.modules",
|
||||
"torch.nn.quantizable",
|
||||
"torch.nn.quantizable.modules",
|
||||
"torch.nn.quantized",
|
||||
"torch.nn.quantized.dynamic",
|
||||
"torch.nn.quantized.dynamic.modules",
|
||||
"torch.nn.quantized.modules",
|
||||
"torch.nn.utils",
|
||||
"torch.package",
|
||||
"torch.package.analyze",
|
||||
"torch.quantization",
|
||||
"torch.quantization.fx",
|
||||
"torch.sparse",
|
||||
"torch.special",
|
||||
"torch.utils",
|
||||
"torch.utils.backcompat",
|
||||
"torch.utils.benchmark.examples",
|
||||
"torch.utils.benchmark.op_fuzzers",
|
||||
"torch.utils.benchmark.utils",
|
||||
"torch.utils.benchmark.utils.valgrind_wrapper",
|
||||
"torch.utils.bottleneck",
|
||||
"torch.utils.data.communication",
|
||||
"torch.utils.data.datapipes",
|
||||
"torch.utils.data.datapipes.dataframe",
|
||||
"torch.utils.data.datapipes.iter",
|
||||
"torch.utils.data.datapipes.map",
|
||||
"torch.utils.data.datapipes.utils",
|
||||
"torch.utils.ffi",
|
||||
"torch.utils.hipify",
|
||||
"torch.utils.model_dump",
|
||||
"torch.utils.tensorboard",
|
||||
]
|
||||
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
@ -417,6 +367,11 @@ def coverage_post_process(app, exception):
|
||||
if not isinstance(app.builder, CoverageBuilder):
|
||||
return
|
||||
|
||||
if not torch.distributed.is_available():
|
||||
raise RuntimeError("The coverage tool cannot run with a version "
|
||||
"of PyTorch that was built with USE_DISTRIBUTED=0 "
|
||||
"as this module's API changes.")
|
||||
|
||||
# These are all the modules that have "automodule" in an rst file
|
||||
# These modules are the ones for which coverage is checked
|
||||
# Here, we make sure that no module is missing from that list
|
||||
@ -443,26 +398,16 @@ def coverage_post_process(app, exception):
|
||||
if modname not in modules:
|
||||
missing.add(modname)
|
||||
|
||||
expected = set(coverage_missing_automodule)
|
||||
|
||||
output = []
|
||||
|
||||
unexpected_missing = missing - expected
|
||||
if unexpected_missing:
|
||||
mods = ", ".join(unexpected_missing)
|
||||
if missing:
|
||||
mods = ", ".join(missing)
|
||||
output.append(f"\nYou added the following module(s) to the PyTorch namespace '{mods}' "
|
||||
"but they have no corresponding entry in a doc .rst file. You should "
|
||||
"either make sure that the .rst file that contains the module's documentation "
|
||||
"properly contains either '.. automodule:: mod_name' (if you do not want "
|
||||
"the paragraph added by the automodule, you can simply use py:module) or "
|
||||
"make the module private (by appending an '_' at the beginning of its name.")
|
||||
|
||||
unexpected_not_missing = expected - missing
|
||||
if unexpected_not_missing:
|
||||
mods = ", ".join(unexpected_not_missing)
|
||||
output.append(f"\nThank you for adding the missing .rst entries for '{mods}', please update "
|
||||
"the 'coverage_missing_automodule' in 'torch/docs/source/conf.py' to remove "
|
||||
"the module(s) you fixed and make sure we do not regress on this in the future.")
|
||||
"the paragraph added by the automodule, you can simply use '.. py:module:: mod_name') "
|
||||
" or make the module private (by appending an '_' at the beginning of its name).")
|
||||
|
||||
# The output file is hard-coded by the coverage tool
|
||||
# Our CI is setup to fail if any line is added to this file
|
||||
|
@ -432,3 +432,15 @@ Example::
|
||||
.. autoclass:: torch.utils.data.WeightedRandomSampler
|
||||
.. autoclass:: torch.utils.data.BatchSampler
|
||||
.. autoclass:: torch.utils.data.distributed.DistributedSampler
|
||||
|
||||
|
||||
.. This module is experimental and should be private, adding it here for now
|
||||
.. py:module:: torch.utils.data.communication
|
||||
|
||||
.. These modules are documented as part of torch/data listing them here for
|
||||
.. now until we have a clearer fix
|
||||
.. py:module:: torch.utils.data.datapipes
|
||||
.. py:module:: torch.utils.data.datapipes.dataframe
|
||||
.. py:module:: torch.utils.data.datapipes.iter
|
||||
.. py:module:: torch.utils.data.datapipes.map
|
||||
.. py:module:: torch.utils.data.datapipes.utils
|
||||
|
@ -808,3 +808,21 @@ following matrix shows how the log level can be adjusted via the combination of
|
||||
+-------------------------+-----------------------------+------------------------+
|
||||
| ``INFO`` | ``DETAIL`` | Trace (a.k.a. All) |
|
||||
+-------------------------+-----------------------------+------------------------+
|
||||
|
||||
|
||||
.. Distributed modules that are missing specific entries.
|
||||
.. Adding them here for tracking purposes until they are more permanently fixed.
|
||||
.. py:module:: torch.distributed.algorithms
|
||||
.. py:module:: torch.distributed.algorithms.ddp_comm_hooks
|
||||
.. py:module:: torch.distributed.algorithms.model_averaging
|
||||
.. py:module:: torch.distributed.elastic
|
||||
.. py:module:: torch.distributed.elastic.utils
|
||||
.. py:module:: torch.distributed.elastic.utils.data
|
||||
.. py:module:: torch.distributed.launcher
|
||||
.. py:module:: torch.distributed.nn
|
||||
.. py:module:: torch.distributed.nn.api
|
||||
.. py:module:: torch.distributed.nn.jit
|
||||
.. py:module:: torch.distributed.nn.jit.templates
|
||||
.. py:module:: torch.distributed.pipeline
|
||||
.. py:module:: torch.distributed.pipeline.sync
|
||||
.. py:module:: torch.distributed.pipeline.sync.skip
|
||||
|
@ -7,8 +7,6 @@ torch.fft
|
||||
Discrete Fourier transforms and related functions.
|
||||
|
||||
.. automodule:: torch.fft
|
||||
:noindex:
|
||||
|
||||
.. currentmodule:: torch.fft
|
||||
|
||||
Fast Fourier Transforms
|
||||
|
@ -1109,3 +1109,12 @@ API Reference
|
||||
:members:
|
||||
|
||||
.. autofunction:: torch.fx.replace_pattern
|
||||
|
||||
|
||||
.. The experimental and passes submodules are missing docs.
|
||||
.. Adding it here for coverage but this doesn't add anything to the
|
||||
.. rendered doc.
|
||||
.. py:module:: torch.fx.passes
|
||||
.. py:module:: torch.fx.experimental
|
||||
.. py:module:: torch.fx.experimental.unification
|
||||
.. py:module:: torch.fx.experimental.unification.multipledispatch
|
||||
|
@ -878,3 +878,7 @@ References
|
||||
|
||||
jit_python_reference
|
||||
jit_unsupported
|
||||
|
||||
.. This package is missing doc. Adding it here for coverage
|
||||
.. This does not add anything to the rendered page.
|
||||
.. py:module:: torch.jit.mobile
|
||||
|
@ -3,6 +3,8 @@
|
||||
|
||||
torch.nn
|
||||
===================================
|
||||
.. automodule:: torch.nn
|
||||
.. automodule:: torch.nn.modules
|
||||
|
||||
These are the basic building blocks for graphs:
|
||||
|
||||
@ -331,6 +333,8 @@ Shuffle Layers
|
||||
|
||||
DataParallel Layers (multi-GPU, distributed)
|
||||
--------------------------------------------
|
||||
.. automodule:: torch.nn.parallel
|
||||
.. currentmodule:: torch
|
||||
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
@ -342,6 +346,7 @@ DataParallel Layers (multi-GPU, distributed)
|
||||
|
||||
Utilities
|
||||
---------
|
||||
.. automodule:: torch.nn.utils
|
||||
|
||||
From the ``torch.nn.utils`` module
|
||||
|
||||
@ -453,3 +458,7 @@ Lazy Modules Initialization
|
||||
:template: classtemplate.rst
|
||||
|
||||
nn.modules.lazy.LazyModuleMixin
|
||||
|
||||
|
||||
.. This module is kept only for backward compatibility
|
||||
.. py:module:: torch.nn.backends
|
||||
|
@ -1,3 +1,6 @@
|
||||
.. automodule:: torch.package
|
||||
.. py:module:: torch.package.analyze
|
||||
|
||||
.. currentmodule:: torch.package
|
||||
|
||||
torch.package
|
||||
|
@ -217,6 +217,8 @@ to configure quantization settings for individual ops.
|
||||
|
||||
torch.nn.intrinsic
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.intrinsic
|
||||
.. automodule:: torch.nn.intrinsic.modules
|
||||
|
||||
This module implements the combined (fused) modules conv + relu which can
|
||||
then be quantized.
|
||||
@ -243,6 +245,9 @@ then be quantized.
|
||||
|
||||
torch.nn.intrinsic.qat
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.intrinsic.qat
|
||||
.. automodule:: torch.nn.intrinsic.qat.modules
|
||||
|
||||
|
||||
This module implements the versions of those fused operations needed for
|
||||
quantization aware training.
|
||||
@ -268,6 +273,9 @@ quantization aware training.
|
||||
|
||||
torch.nn.intrinsic.quantized
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.intrinsic.quantized
|
||||
.. automodule:: torch.nn.intrinsic.quantized.modules
|
||||
|
||||
|
||||
This module implements the quantized implementations of fused operations
|
||||
like conv + relu. No BatchNorm variants as it's usually folded into convolution
|
||||
@ -289,6 +297,8 @@ for inference.
|
||||
|
||||
torch.nn.intrinsic.quantized.dynamic
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.intrinsic.quantized.dynamic
|
||||
.. automodule:: torch.nn.intrinsic.quantized.dynamic.modules
|
||||
|
||||
This module implements the quantized dynamic implementations of fused operations
|
||||
like linear + relu.
|
||||
@ -304,6 +314,8 @@ like linear + relu.
|
||||
|
||||
torch.nn.qat
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.qat
|
||||
.. automodule:: torch.nn.qat.modules
|
||||
|
||||
This module implements versions of the key nn modules **Conv2d()** and
|
||||
**Linear()** which run in FP32 but with rounding applied to simulate the
|
||||
@ -322,6 +334,8 @@ effect of INT8 quantization.
|
||||
|
||||
torch.nn.qat.dynamic
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.qat.dynamic
|
||||
.. automodule:: torch.nn.qat.dynamic.modules
|
||||
|
||||
This module implements versions of the key nn modules such as **Linear()**
|
||||
which run in FP32 but with rounding applied to simulate the effect of INT8
|
||||
@ -338,6 +352,8 @@ quantization and will be dynamically quantized during inference.
|
||||
|
||||
torch.nn.quantized
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.quantized
|
||||
.. automodule:: torch.nn.quantized.modules
|
||||
|
||||
This module implements the quantized versions of the nn layers such as
|
||||
~`torch.nn.Conv2d` and `torch.nn.ReLU`.
|
||||
@ -376,6 +392,7 @@ This module implements the quantized versions of the nn layers such as
|
||||
|
||||
torch.nn.quantized.functional
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.quantized.functional
|
||||
|
||||
This module implements the quantized versions of the functional layers such as
|
||||
~`torch.nn.functional.conv2d` and `torch.nn.functional.relu`. Note:
|
||||
@ -413,6 +430,8 @@ This module implements the quantized versions of the functional layers such as
|
||||
|
||||
torch.nn.quantized.dynamic
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. automodule:: torch.nn.quantized.dynamic
|
||||
.. automodule:: torch.nn.quantized.dynamic.modules
|
||||
|
||||
Dynamically quantized :class:`~torch.nn.Linear`, :class:`~torch.nn.LSTM`,
|
||||
:class:`~torch.nn.LSTMCell`, :class:`~torch.nn.GRUCell`, and
|
||||
@ -492,3 +511,8 @@ the `custom operator mechanism <https://pytorch.org/tutorials/advanced/torch_scr
|
||||
* :attr:`torch.quint8` — 8-bit unsigned integer
|
||||
* :attr:`torch.qint8` — 8-bit signed integer
|
||||
* :attr:`torch.qint32` — 32-bit signed integer
|
||||
|
||||
|
||||
.. These modules are missing docs. Adding them here only for tracking
|
||||
.. automodule:: torch.nn.quantizable
|
||||
.. automodule:: torch.nn.quantizable.modules
|
||||
|
@ -3,6 +3,9 @@
|
||||
Quantization
|
||||
============
|
||||
|
||||
.. automodule:: torch.quantization
|
||||
.. automodule:: torch.quantization.fx
|
||||
|
||||
.. warning ::
|
||||
Quantization is in beta and subject to change.
|
||||
|
||||
@ -883,3 +886,22 @@ Numerical Debugging (prototype)
|
||||
Eager mode numeric suite
|
||||
* :ref:`torch_ao_ns_numeric_suite_fx`
|
||||
FX numeric suite
|
||||
|
||||
|
||||
.. torch.ao is missing documentation. Since part of it is mentioned here, adding them here for now.
|
||||
.. They are here for tracking purposes until they are more permanently fixed.
|
||||
.. py:module:: torch.ao
|
||||
.. py:module:: torch.ao.nn
|
||||
.. py:module:: torch.ao.nn.sparse
|
||||
.. py:module:: torch.ao.nn.sparse.quantized
|
||||
.. py:module:: torch.ao.nn.sparse.quantized.dynamic
|
||||
.. py:module:: torch.ao.ns
|
||||
.. py:module:: torch.ao.ns.fx
|
||||
.. py:module:: torch.ao.quantization
|
||||
.. py:module:: torch.ao.quantization.fx
|
||||
.. py:module:: torch.ao.quantization.fx.backend_config
|
||||
.. py:module:: torch.ao.sparsity
|
||||
.. py:module:: torch.ao.sparsity.experimental
|
||||
.. py:module:: torch.ao.sparsity.experimental.pruner
|
||||
.. py:module:: torch.ao.sparsity.scheduler
|
||||
.. py:module:: torch.ao.sparsity.sparsifier
|
||||
|
@ -1,3 +1,5 @@
|
||||
.. automodule:: torch.sparse
|
||||
|
||||
.. currentmodule:: torch
|
||||
|
||||
.. _sparse-docs:
|
||||
|
@ -7,8 +7,6 @@ torch.special
|
||||
The torch.special module, modeled after SciPy's `special <https://docs.scipy.org/doc/scipy/reference/special.html>`_ module.
|
||||
|
||||
.. automodule:: torch.special
|
||||
:noindex:
|
||||
|
||||
.. currentmodule:: torch.special
|
||||
|
||||
Functions
|
||||
|
@ -1,5 +1,6 @@
|
||||
torch.utils.tensorboard
|
||||
===================================
|
||||
.. automodule:: torch.utils.tensorboard
|
||||
|
||||
Before going further, more details on TensorBoard can be found at
|
||||
https://www.tensorflow.org/tensorboard/
|
||||
|
@ -1,13 +1,6 @@
|
||||
torch
|
||||
=====
|
||||
The torch package contains data structures for multi-dimensional
|
||||
tensors and defines mathematical operations over these tensors.
|
||||
Additionally, it provides many utilities for efficient serializing of
|
||||
Tensors and arbitrary types, and other useful utilities.
|
||||
|
||||
It has a CUDA counterpart, that enables you to run your tensor computations
|
||||
on an NVIDIA GPU with compute capability >= 3.0
|
||||
|
||||
.. automodule:: torch
|
||||
.. currentmodule:: torch
|
||||
|
||||
Tensors
|
||||
@ -615,3 +608,18 @@ Utilities
|
||||
is_warn_always_enabled
|
||||
vmap
|
||||
_assert
|
||||
|
||||
|
||||
.. Empty submodules added only for tracking.
|
||||
.. py:module:: torch.contrib
|
||||
.. py:module:: torch.utils.backcompat
|
||||
|
||||
.. This submodule is split manually without a top level page.
|
||||
.. py:module:: torch.utils
|
||||
|
||||
.. This module is only used internally for ROCm builds.
|
||||
.. py:module:: torch.utils.hipify
|
||||
|
||||
.. This module needs to be documented. Adding here in the meantime
|
||||
.. for tracking purposes
|
||||
.. py:module:: torch.utils.model_dump
|
||||
|
@ -411,12 +411,6 @@ class TestDataLoaderUtils(TestCase):
|
||||
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
|
||||
|
||||
|
||||
class TestFFI(TestCase):
|
||||
def test_deprecated(self):
|
||||
with self.assertRaisesRegex(ImportError, "torch.utils.ffi is deprecated. Please use cpp extensions instead."):
|
||||
from torch.utils.ffi import create_extension # type: ignore[attr-defined] # noqa: F401
|
||||
|
||||
|
||||
@unittest.skipIf('SKIP_TEST_BOTTLENECK' in os.environ.keys(), 'SKIP_TEST_BOTTLENECK is set')
|
||||
class TestBottleneck(TestCase):
|
||||
def _run(self, command, timeout=30):
|
||||
|
@ -1 +0,0 @@
|
||||
from .onnx import * # noqa: F403
|
@ -1 +0,0 @@
|
||||
raise ImportError("torch.utils.ffi is deprecated. Please use cpp extensions instead.")
|
Reference in New Issue
Block a user