[BE] Adding aliases for CUDA and XPU API documentation (#162984)

This PR reorganizes CUDA and XPU API documentation with additional aliases pages. Multiple entries of APIs under torch.cuda are thus removed.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/162984
Approved by: https://github.com/janeyx99
This commit is contained in:
Jiannan Wang
2025-09-21 22:28:27 +00:00
committed by PyTorch MergeBot
parent 8b14f43da9
commit 6ac2b3ae35
5 changed files with 89 additions and 39 deletions

View File

@ -509,10 +509,6 @@ coverage_ignore_functions = [
"custom_fwd",
# torch.cuda.amp.common
"amp_definitely_not_available",
# torch.cuda.graphs
"graph_pool_handle",
"is_current_stream_capturing",
"make_graphed_callables",
# torch.mtia.memory
"reset_peak_memory_stats",
# torch.cuda.nccl
@ -524,25 +520,11 @@ coverage_ignore_functions = [
"reduce_scatter",
"unique_id",
"version",
# torch.cuda.nvtx
"range",
"range_end",
"range_start",
# torch.cuda.profiler
"init",
"profile",
"start",
"stop",
# torch.cuda.random
"get_rng_state",
"get_rng_state_all",
"initial_seed",
"manual_seed",
"manual_seed_all",
"seed",
"seed_all",
"set_rng_state",
"set_rng_state_all",
# torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook
"hook_with_zero_step",
"hook_with_zero_step_interleaved",
@ -2172,8 +2154,6 @@ coverage_ignore_classes = [
"EventHandler",
"SynchronizationError",
"UnsynchronizedAccessError",
# torch.cuda.memory
"MemPool",
# torch.distributed.elastic.multiprocessing.errors
"ChildFailedError",
"ProcessFailure",
@ -2479,10 +2459,6 @@ coverage_ignore_classes = [
# torch.amp.grad_scaler
"GradScaler",
"OptState",
# torch.cuda.graphs
"CUDAGraph",
# torch.cuda.streams
"Event",
# torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook
"PostLocalSGDState",
# torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook

View File

@ -0,0 +1,47 @@
# Aliases in torch.cuda
The following are aliases to their counterparts in ``torch.cuda`` in the nested namespaces in which they are defined. For any of these APIs, feel free to use the top-level version in ``torch.cuda`` like ``torch.cuda.seed`` or the nested version ``torch.cuda.random.seed``.
```{eval-rst}
.. automodule:: torch.cuda.random
.. currentmodule:: torch.cuda.random
.. autosummary::
:toctree: generated
:nosignatures:
get_rng_state
get_rng_state_all
set_rng_state
set_rng_state_all
manual_seed
manual_seed_all
seed
seed_all
initial_seed
```
```{eval-rst}
.. automodule:: torch.cuda.graphs
.. currentmodule:: torch.cuda.graphs
.. autosummary::
:toctree: generated
:nosignatures:
is_current_stream_capturing
graph_pool_handle
CUDAGraph
graph
make_graphed_callables
```
```{eval-rst}
.. automodule:: torch.cuda.streams
.. currentmodule:: torch.cuda.streams
.. autosummary::
:toctree: generated
:nosignatures:
Stream
ExternalStream
Event
```

View File

@ -274,10 +274,6 @@ See the docs for {class}`~torch.cuda.gds.GdsFile` for an example of how to use t
.. py:module:: torch.cuda.gds
```
```{eval-rst}
.. py:module:: torch.cuda.graphs
```
```{eval-rst}
.. py:module:: torch.cuda.jiterator
```
@ -294,14 +290,13 @@ See the docs for {class}`~torch.cuda.gds.GdsFile` for an example of how to use t
.. py:module:: torch.cuda.profiler
```
```{eval-rst}
.. py:module:: torch.cuda.random
```
```{eval-rst}
.. py:module:: torch.cuda.sparse
```
```{eval-rst}
.. py:module:: torch.cuda.streams
.. toctree::
:hidden:
cuda.aliases.md
```

View File

@ -0,0 +1,32 @@
# Aliases in torch.xpu
The following are aliases to their counterparts in ``torch.xpu`` in the nested namespaces in which they are defined. For any of these APIs, feel free to use the top-level version in ``torch.xpu`` like ``torch.xpu.seed`` or the nested version ``torch.xpu.random.seed``.
```{eval-rst}
.. automodule:: torch.xpu.random
.. currentmodule:: torch.xpu.random
.. autosummary::
:toctree: generated
:nosignatures:
get_rng_state
get_rng_state_all
initial_seed
manual_seed
manual_seed_all
seed
seed_all
set_rng_state
set_rng_state_all
```
```{eval-rst}
.. automodule:: torch.xpu.streams
.. currentmodule:: torch.xpu.streams
.. autosummary::
:toctree: generated
:nosignatures:
Event
Stream
```

View File

@ -86,9 +86,9 @@
reset_peak_memory_stats
```
<!-- This module needs to be documented. Adding here in the meantime
for tracking purposes -->
```{eval-rst}
.. py:module:: torch.xpu.random
.. py:module:: torch.xpu.streams
.. toctree::
:hidden:
xpu.aliases.md
```