Compare commits

...

17 Commits

10 changed files with 100 additions and 25 deletions

View File

@ -1,23 +1,24 @@
sphinx==5.3.0 sphinx==6.2.1
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 5.3.0 #Pinned versions: 7.2.6
-e git+https://github.com/pytorch/pytorch_sphinx_theme.git@1657ad2fc1acdc98aa719eebecbb0128a7c13ce4#egg=pytorch_sphinx_theme2 -e git+https://github.com/pytorch/pytorch_sphinx_theme.git@1657ad2fc1acdc98aa719eebecbb0128a7c13ce4#egg=pytorch_sphinx_theme2
sphinx-remove-toctrees==1.0.0.post1
#Description: This is used to generate PyTorch docs
#Pinned versions: 1.0.0.post1
# TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering # TODO: sphinxcontrib.katex 0.9.0 adds a local KaTeX server to speed up pre-rendering
# but it doesn't seem to work and hangs around idly. The initial thought that it is probably # but it doesn't seem to work and hangs around idly. The initial thought that it is probably
# something related to Docker setup. We can investigate this later. # something related to Docker setup. We can investigate this later.
sphinxcontrib.katex==0.8.6 sphinxcontrib.katex==0.9.10
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 0.8.6 #Pinned versions: 0.9.10
sphinxext-opengraph==0.9.1 sphinx_sitemap==2.7.1
#Description: This is used to generate PyTorch docs
#Pinned versions: 0.9.1
sphinx_sitemap==2.6.0
#Description: This is used to generate sitemap for PyTorch docs #Description: This is used to generate sitemap for PyTorch docs
#Pinned versions: 2.6.0 #Pinned versions: 2.7.1
matplotlib==3.5.3 ; python_version < "3.13" matplotlib==3.5.3 ; python_version < "3.13"
matplotlib==3.6.3 ; python_version >= "3.13" matplotlib==3.6.3 ; python_version >= "3.13"
@ -29,17 +30,17 @@ tensorboard==2.18.0 ; python_version >= "3.13"
#Description: This is used to generate PyTorch docs #Description: This is used to generate PyTorch docs
#Pinned versions: 2.13.0 #Pinned versions: 2.13.0
breathe==4.34.0 breathe==4.35.0
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 4.34.0 #Pinned versions: 4.35.0
exhale==0.2.3 exhale==0.3.7
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 0.2.3 #Pinned versions: 0.3.7
docutils==0.16 docutils==0.18.1
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
#Pinned versions: 0.16 #Pinned versions: 0.18.1
bs4==0.0.1 bs4==0.0.1
#Description: This is used to generate PyTorch C++ docs #Description: This is used to generate PyTorch C++ docs
@ -49,13 +50,24 @@ IPython==8.12.0
#Description: This is used to generate PyTorch functorch docs #Description: This is used to generate PyTorch functorch docs
#Pinned versions: 8.12.0 #Pinned versions: 8.12.0
<<<<<<< HEAD
myst-nb==0.17.2 myst-nb==0.17.2
#Description: This is used to generate PyTorch functorch and torch.compile docs. #Description: This is used to generate PyTorch functorch and torch.compile docs.
#Pinned versions: 0.17.2 #Pinned versions: 0.17.2
=======
myst-nb==1.2.0
#Description: This is used to generate PyTorch functorch docs
#Pinned versions: 1.2.0
>>>>>>> 195382ce28e (Update)
# The following are required to build torch.distributed.elastic.rendezvous.etcd* docs # The following are required to build torch.distributed.elastic.rendezvous.etcd* docs
python-etcd==0.4.5 python-etcd==0.4.5
sphinx-copybutton==0.5.0 sphinx-copybutton==0.5.2
sphinx-design==0.4.0 sphinx-design==0.6.1
sphinxcontrib-mermaid==1.0.0 sphinxcontrib-mermaid==1.0.0
<<<<<<< HEAD
myst-parser==0.18.1 myst-parser==0.18.1
=======
myst-parser==3.0.1
myst-nb
>>>>>>> 195382ce28e (Update)

View File

@ -83,6 +83,10 @@ rm -rf pytorch || true
pushd "$pt_checkout" pushd "$pt_checkout"
pushd docs pushd docs
# Profile the docs build to see what is taking the longest
python -m cProfile -o docs_build.prof -m sphinx.cmd.build -b html -d build/doctrees source build/html
python -c "import pstats; p = pstats.Stats('docs_build.prof'); p.sort_stats('cumtime').print_stats(50)"
# Build the docs # Build the docs
if [ "$is_main_doc" = true ]; then if [ "$is_main_doc" = true ]; then
build_docs html || exit $? build_docs html || exit $?

View File

@ -74,7 +74,11 @@ jobs:
- docs_type: python - docs_type: python
runner: ${{ inputs.runner_prefix }}linux.2xlarge runner: ${{ inputs.runner_prefix }}linux.2xlarge
# It takes less than 30m to finish python docs unless there are issues # It takes less than 30m to finish python docs unless there are issues
timeout-minutes: 30 timeout-minutes: 60
- docs_type: functorch
runner: ${{ inputs.runner_prefix }}linux.2xlarge
# It takes less than 15m to finish functorch docs unless there are issues
timeout-minutes: 15
# Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180) # Set a fixed name for this job instead of using the current matrix-generated name, i.e. build-docs (cpp, linux.12xlarge, 180)
# The current name requires updating the database last docs push query from test-infra every time the matrix is updated # The current name requires updating the database last docs push query from test-infra every time the matrix is updated
name: build-docs-${{ matrix.docs_type }}-${{ inputs.push }} name: build-docs-${{ matrix.docs_type }}-${{ inputs.push }}

View File

@ -0,0 +1,7 @@
.. role:: hidden
:class: hidden-section
.. currentmodule:: {{ module }}
{{ name | underline }}
.. autofunction:: {{ fullname }}

View File

@ -0,0 +1,7 @@
.. role:: hidden
:class: hidden-section
.. currentmodule:: {{ module }}
{{ name | underline }}
.. automethod:: {{ fullname }}

View File

@ -66,6 +66,7 @@ extensions = [
"sphinx.ext.linkcode", "sphinx.ext.linkcode",
"sphinxcontrib.mermaid", "sphinxcontrib.mermaid",
"sphinx_sitemap", "sphinx_sitemap",
"sphinx_remove_toctrees"
] ]
myst_enable_extensions = [ myst_enable_extensions = [
@ -74,6 +75,9 @@ myst_enable_extensions = [
"html_image", "html_image",
] ]
# Remove the "generated" tag from the toctree to allow for faster builds
remove_from_toctrees = ["generated/*"]
html_baseurl = "https://docs.pytorch.org/docs/stable/" # needed for sphinx-sitemap html_baseurl = "https://docs.pytorch.org/docs/stable/" # needed for sphinx-sitemap
sitemap_locales = [None] sitemap_locales = [None]
sitemap_excludes = [ sitemap_excludes = [
@ -93,8 +97,10 @@ numpydoc_show_class_members = False
# autosectionlabel throws warnings if section names are duplicated. # autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for # The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents. # duplicated section names that are in different documents.
autosectionlabel_prefix_document = True autosectionlabel_prefix_document = True
# katex options # katex options
# #
# #
@ -207,6 +213,41 @@ templates_path = [
] ]
# TODO: document these and remove them from here. # TODO: document these and remove them from here.
autosummary_filename_map = {
'torch.nn.utils.prune.identity': 'torch.nn.utils.prune.identity_function',
'torch.nn.utils.prune.Identity': 'torch.nn.utils.prune.Identity_class',
'torch.optim.adamw.adamw': 'torch.optim.adamw.adamw_function',
'torch.optim.adamw.AdamW': 'torch.optim.adamw.AdamW_class',
'torch.optim.asgd.asgd': 'torch.optim.asgd.asgd_function',
'torch.optim.asgd.ASGD': 'torch.optim.asgd.ASGD_class',
'torch.optim.nadam.nadam': 'torch.optim.nadam.nadam_function',
'torch.optim.nadam.NAdam': 'torch.optim.nadam.NAdam_class',
'torch.optim.radam.radam': 'torch.optim.radam.radam_function',
'torch.optim.radam.RAdam': 'torch.optim.radam.RAdam_class',
'torch.optim.rmsprop.rmsprop': 'torch.optim.rmsprop.rmsprop_function',
'torch.optim.rmsprop.RMSprop': 'torch.optim.rmsprop.RMSprop_class',
'torch.optim.rprop.rprop': 'torch.optim.rprop.rprop_function',
'torch.optim.rprop.Rprop': 'torch.optim.rprop.Rprop_class',
'torch.optim.sgd.sgd': 'torch.optim.sgd.sgd_function',
'torch.optim.sgd.SGD': 'torch.optim.sgd.SGD_class',
'torch.optim.adadelta.adadelta': 'torch.optim.adadelta.adadelta_function',
'torch.optim.adadelta.Adadelta': 'torch.optim.adadelta.Adadelta_class',
'torch.optim.adagrad.adagrad': 'torch.optim.adagrad.adagrad_function',
'torch.optim.adagrad.Adagrad': 'torch.optim.adagrad.Adagrad_class',
'torch.optim.adam.adam': 'torch.optim.adam.adam_function',
'torch.optim.adam.Adam': 'torch.optim.adam.Adam_class',
'torch.optim.adamax.adamax': 'torch.optim.adamax.adamax_function',
'torch.optim.adamax.Adamax': 'torch.optim.adamax.Adamax_class',
'torch.mtia.stream': 'torch.mtia.stream_function',
'torch.mtia.Stream': 'torch.mtia.Stream_class',
'torch.cpu.stream': 'torch.cpu.stream_function',
'torch.cpu.Stream': 'torch.cpu.Stream_class',
'torch.cuda.stream': 'torch.cuda.stream_function',
'torch.cuda.Stream': 'torch.cuda.Stream_class',
'torch.xpu.stream': 'torch.xpu.stream_function',
'torch.xpu.Stream': 'torch.xpu.Stream_class',
}
coverage_ignore_functions = [ coverage_ignore_functions = [
# torch # torch
"typename", "typename",

View File

@ -21,10 +21,10 @@
The following operations will fill uninitialized memory when this setting is The following operations will fill uninitialized memory when this setting is
turned on: turned on:
* :func:`torch.Tensor.resize_` when called with a tensor that is not * :meth:`torch.Tensor.resize_` when called with a tensor that is not
quantized quantized
* :func:`torch.empty` * :func:`torch.empty`
* :func:`torch.empty_strided` * :func:`torch.empty_strided`
* :func:`torch.empty_permuted` * :func:`torch.empty_permuted`
* :func:`torch.empty_like` * :func:`torch.empty_like`
``` ```

View File

@ -1409,7 +1409,7 @@ def use_deterministic_algorithms(
* :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
* :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
* :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
* :func:`torch.Tensor.resize_` when called with a quantized tensor * :meth:`torch.Tensor.resize_` when called with a quantized tensor
In addition, several operations fill uninitialized memory when this setting In addition, several operations fill uninitialized memory when this setting
is turned on and when is turned on and when

View File

@ -14181,7 +14181,7 @@ are freshly created instead of aliasing the input.
add_docstr( add_docstr(
torch.expand_copy, torch.expand_copy,
r""" r"""
Performs the same operation as :func:`torch.Tensor.expand`, but all output tensors Performs the same operation as :meth:`torch.Tensor.expand`, but all output tensors
are freshly created instead of aliasing the input. are freshly created instead of aliasing the input.
""", """,
) )

View File

@ -1999,7 +1999,7 @@ def gradcheck(
.. warning:: .. warning::
If any checked tensor in :attr:`input` has overlapping memory, i.e., If any checked tensor in :attr:`input` has overlapping memory, i.e.,
different indices pointing to the same memory address (e.g., from different indices pointing to the same memory address (e.g., from
:func:`torch.Tensor.expand`), this check will likely fail because the numerical :meth:`torch.Tensor.expand`), this check will likely fail because the numerical
gradients computed by point perturbation at such indices will change gradients computed by point perturbation at such indices will change
values at all other indices that share the same memory address. values at all other indices that share the same memory address.
@ -2153,7 +2153,7 @@ def gradgradcheck(
.. warning:: .. warning::
If any checked tensor in :attr:`input` and :attr:`grad_outputs` has If any checked tensor in :attr:`input` and :attr:`grad_outputs` has
overlapping memory, i.e., different indices pointing to the same memory overlapping memory, i.e., different indices pointing to the same memory
address (e.g., from :func:`torch.Tensor.expand`), this check will likely fail address (e.g., from :meth:`torch.Tensor.expand`), this check will likely fail
because the numerical gradients computed by point perturbation at such because the numerical gradients computed by point perturbation at such
indices will change values at all other indices that share the same indices will change values at all other indices that share the same
memory address. memory address.