mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Formatting changes for gradient scaling (#33832)
Summary: hard to get right locally...I can build the docs but never quite match what it looks like live. the bullet point indentation was just an oversight. Removing `Returns:` formatting tabs because they take up a lot of space when rendered and add no clarity. Some functions in Pytorch [do use them](https://pytorch.org/docs/master/torch.html#torch.eye), but [many don't bother](https://pytorch.org/docs/master/torch.html#torch.is_tensor), so apparently some people shared my feelings (Not using them is in line with existing practice). Pull Request resolved: https://github.com/pytorch/pytorch/pull/33832 Differential Revision: D20135581 Pulled By: ngimel fbshipit-source-id: bc788a7e57b142f95c4fa5baf3fe01f94c45abd8
This commit is contained in:
committed by
Facebook Github Bot
parent
5dde8cd483
commit
a726827ec8
@ -125,11 +125,11 @@ class GradScaler(object):
|
||||
"""
|
||||
Multiplies ('scales') a tensor or list of tensors by the scale factor.
|
||||
|
||||
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
|
||||
unmodified.
|
||||
|
||||
Arguments:
|
||||
outputs (Tensor or iterable of Tensors): Outputs to scale.
|
||||
|
||||
Returns:
|
||||
Scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned unmodified.
|
||||
"""
|
||||
if not self._enabled:
|
||||
return outputs
|
||||
@ -234,14 +234,13 @@ class GradScaler(object):
|
||||
|
||||
``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``.
|
||||
|
||||
Returns the return value of ``optimizer.step(*args, **kwargs)``.
|
||||
|
||||
Arguments:
|
||||
optimizer (torch.optim.Optimizer): Optimizer that applies the gradients.
|
||||
args: Any arguments.
|
||||
kwargs: Any keyword arguments.
|
||||
|
||||
Returns:
|
||||
The return value of ``optimizer.step(*args, **kwargs)``.
|
||||
|
||||
.. warning::
|
||||
Closure use is not currently supported.
|
||||
"""
|
||||
@ -342,8 +341,7 @@ class GradScaler(object):
|
||||
|
||||
def get_scale(self):
|
||||
"""
|
||||
Returns:
|
||||
A Python float containing the current scale, or 1.0 if scaling is disabled.
|
||||
Returns a Python float containing the current scale, or 1.0 if scaling is disabled.
|
||||
|
||||
.. warning::
|
||||
:meth:`get_scale` incurs a CPU-GPU sync.
|
||||
@ -355,8 +353,7 @@ class GradScaler(object):
|
||||
|
||||
def get_growth_factor(self):
|
||||
r"""
|
||||
Returns:
|
||||
A Python float containing the scale growth factor.
|
||||
Returns a Python float containing the scale growth factor.
|
||||
"""
|
||||
return self._growth_factor
|
||||
|
||||
@ -369,8 +366,7 @@ class GradScaler(object):
|
||||
|
||||
def get_backoff_factor(self):
|
||||
r"""
|
||||
Returns:
|
||||
A Python float containing the scale backoff factor.
|
||||
Returns a Python float containing the scale backoff factor.
|
||||
"""
|
||||
return self._backoff_factor
|
||||
|
||||
@ -383,8 +379,7 @@ class GradScaler(object):
|
||||
|
||||
def get_growth_interval(self):
|
||||
r"""
|
||||
Returns:
|
||||
A Python int containing the growth interval.
|
||||
Returns a Python int containing the growth interval.
|
||||
"""
|
||||
return self._growth_interval
|
||||
|
||||
@ -403,8 +398,7 @@ class GradScaler(object):
|
||||
|
||||
def is_enabled(self):
|
||||
r"""
|
||||
Returns:
|
||||
A bool indicating whether this instance is enabled.
|
||||
Returns a bool indicating whether this instance is enabled.
|
||||
"""
|
||||
return self._enabled
|
||||
|
||||
|
Reference in New Issue
Block a user