mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Document the rest of the specific optimizer module APIs (#158669)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/158669 Approved by: https://github.com/albanD ghstack dependencies: #158483
This commit is contained in:
committed by
PyTorch MergeBot
parent
f73594164a
commit
7cc5d03dfc
@ -1840,25 +1840,9 @@ coverage_ignore_functions = [
|
||||
"check_export_model_diff",
|
||||
"verify",
|
||||
"verify_aten_graph",
|
||||
# torch.optim.adamax
|
||||
"adamax",
|
||||
# torch.optim.adamw
|
||||
"adamw",
|
||||
# torch.optim.asgd
|
||||
"asgd",
|
||||
# torch.optim.nadam
|
||||
"nadam",
|
||||
# torch.optim.optimizer
|
||||
"register_optimizer_step_post_hook",
|
||||
"register_optimizer_step_pre_hook",
|
||||
# torch.optim.radam
|
||||
"radam",
|
||||
# torch.optim.rmsprop
|
||||
"rmsprop",
|
||||
# torch.optim.rprop
|
||||
"rprop",
|
||||
# torch.optim.sgd
|
||||
"sgd",
|
||||
# torch.optim.swa_utils
|
||||
"get_ema_avg_fn",
|
||||
"get_ema_multi_avg_fn",
|
||||
@ -3131,23 +3115,8 @@ coverage_ignore_classes = [
|
||||
"ReduceLROnPlateau",
|
||||
"SequentialLR",
|
||||
"StepLR",
|
||||
# torch.optim.nadam
|
||||
"NAdam",
|
||||
# torch.optim.optimizer
|
||||
"Optimizer",
|
||||
# torch.optim.radam
|
||||
"RAdam",
|
||||
# torch.optim.rmsprop
|
||||
"RMSprop",
|
||||
# torch.optim.rprop
|
||||
"Rprop",
|
||||
# torch.optim.sgd
|
||||
"SGD",
|
||||
# torch.optim.sparse_adam
|
||||
"SparseAdam",
|
||||
# torch.optim.swa_utils
|
||||
"AveragedModel",
|
||||
"SWALR",
|
||||
# torch.overrides
|
||||
"BaseTorchFunctionMode",
|
||||
"TorchFunctionMode",
|
||||
|
@ -34,3 +34,111 @@ The following are aliases to their counterparts in ``torch.optim`` in the nested
|
||||
Adam
|
||||
adam
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.adamax
|
||||
.. currentmodule:: torch.optim.adamax
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
Adamax
|
||||
adamax
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.adamw
|
||||
.. currentmodule:: torch.optim.adamw
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
AdamW
|
||||
adamw
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.asgd
|
||||
.. currentmodule:: torch.optim.asgd
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
ASGD
|
||||
asgd
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.lbfgs
|
||||
.. currentmodule:: torch.optim.lbfgs
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
LBFGS
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.nadam
|
||||
.. currentmodule:: torch.optim.nadam
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
NAdam
|
||||
nadam
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.radam
|
||||
.. currentmodule:: torch.optim.radam
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
RAdam
|
||||
radam
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.rmsprop
|
||||
.. currentmodule:: torch.optim.rmsprop
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
RMSprop
|
||||
rmsprop
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.rprop
|
||||
.. currentmodule:: torch.optim.rprop
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
Rprop
|
||||
rprop
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.sgd
|
||||
.. currentmodule:: torch.optim.sgd
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
SGD
|
||||
sgd
|
||||
```
|
||||
|
||||
```{eval-rst}
|
||||
.. automodule:: torch.optim.sparse_adam
|
||||
.. currentmodule:: torch.optim.sparse_adam
|
||||
.. autosummary::
|
||||
:toctree: generated
|
||||
:nosignatures:
|
||||
|
||||
SparseAdam
|
||||
```
|
||||
|
@ -688,18 +688,8 @@ We train the model for a total of 300 epochs and start to collect EMA averages i
|
||||
<!-- This module needs to be documented. Adding here in the meantime
|
||||
for tracking purposes -->
|
||||
```{eval-rst}
|
||||
.. py:module:: torch.optim.adamax
|
||||
.. py:module:: torch.optim.adamw
|
||||
.. py:module:: torch.optim.asgd
|
||||
.. py:module:: torch.optim.lbfgs
|
||||
.. py:module:: torch.optim.lr_scheduler
|
||||
.. py:module:: torch.optim.nadam
|
||||
.. py:module:: torch.optim.optimizer
|
||||
.. py:module:: torch.optim.radam
|
||||
.. py:module:: torch.optim.rmsprop
|
||||
.. py:module:: torch.optim.rprop
|
||||
.. py:module:: torch.optim.sgd
|
||||
.. py:module:: torch.optim.sparse_adam
|
||||
.. py:module:: torch.optim.swa_utils
|
||||
```
|
||||
|
||||
|
@ -21,7 +21,7 @@ from .optimizer import (
|
||||
)
|
||||
|
||||
|
||||
__all__: list[str] = ["Adagrad", "adagrad"]
|
||||
__all__ = ["Adagrad", "adagrad"]
|
||||
|
||||
|
||||
class Adagrad(Optimizer):
|
||||
|
Reference in New Issue
Block a user