[Doc] Update docstrings for torch/random.py (#125265)

Updates the docstrings for torch/random.py to clarify what device / RNG each function operates on.

While trying to understand the difference between
```
state = torch.random.get_rng_state()
some_code
torch.random.set_rng_state(state)
```
and
```
with torch.random.fork_rng():
    some_code
```
I found out that there was a note about this in the docstring that wasn't being rendered on the website. I fixed that note and added additional clarifications on other functions in this file.

Test Plan:
Built the docs and verified that everything renders correctly.

<img width="911" alt="Screenshot 2024-04-30 at 2 22 08 PM" src="https://github.com/pytorch/pytorch/assets/9263852/f219bc35-89bd-4f5b-ba60-255b089499a4">

<img width="901" alt="Screenshot 2024-04-30 at 2 22 13 PM" src="https://github.com/pytorch/pytorch/assets/9263852/c141e7fa-afc9-4c66-b460-96668ce35606">

Pull Request resolved: https://github.com/pytorch/pytorch/pull/125265
Approved by: https://github.com/Balandat, https://github.com/lezcano
This commit is contained in:
Sait Cakmak
2024-05-02 20:55:23 +00:00
committed by PyTorch MergeBot
parent fc76764a56
commit 1657f7e262

View File

@ -9,8 +9,8 @@ import torch
def set_rng_state(new_state: torch.Tensor) -> None:
r"""Sets the random number generator state.
.. note: This function only works for CPU. For CUDA, please use
torch.manual_seed(seed), which works for both CPU and CUDA.
.. note:: This function only works for CPU. For CUDA, please use
:func:`torch.manual_seed`, which works for both CPU and CUDA.
Args:
new_state (torch.ByteTensor): The desired state
@ -19,12 +19,17 @@ def set_rng_state(new_state: torch.Tensor) -> None:
def get_rng_state() -> torch.Tensor:
r"""Returns the random number generator state as a `torch.ByteTensor`."""
r"""Returns the random number generator state as a `torch.ByteTensor`.
.. note:: The returned state is for the default generator on CPU only.
See also: :func:`torch.random.fork_rng`.
"""
return default_generator.get_state()
def manual_seed(seed) -> torch._C.Generator:
r"""Sets the seed for generating random numbers. Returns a
r"""Sets the seed for generating random numbers on all devices. Returns a
`torch.Generator` object.
Args:
@ -54,7 +59,7 @@ def manual_seed(seed) -> torch._C.Generator:
def seed() -> int:
r"""Sets the seed for generating random numbers to a non-deterministic
random number. Returns a 64 bit number used to seed the RNG.
random number on all devices. Returns a 64 bit number used to seed the RNG.
"""
seed = default_generator.seed()
import torch.cuda
@ -101,6 +106,8 @@ def _seed_custom_device(seed) -> None:
def initial_seed() -> int:
r"""Returns the initial seed for generating random numbers as a
Python `long`.
.. note:: The returned seed is for the default generator on CPU only.
"""
return default_generator.initial_seed()
@ -123,7 +130,7 @@ def fork_rng(devices=None, enabled=True, _caller="fork_rng", _devices_kw="device
enabled (bool): if ``False``, the RNG is not forked. This is a convenience
argument for easily disabling the context manager without having
to delete it and unindent your Python code under it.
deivce_type (str): device type str, default is `cuda`. As for custom device,
device_type (str): device type str, default is `cuda`. As for custom device,
see details in [Note: support the custom device with privateuse1]
"""