mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-28 18:54:57 +08:00
This PR fixes several bugs, listed in priority: 1. `load_state_dict` with a nontensor step was incorrect for capturable and fused implementations since we don't create the tensors on the right device in `__setstate__`. This has been fixed. 2. The most recently added capturable implementations forgot the check that all tensors should be on CUDA for eager. We've now added those checks 3. The most recent change in Adamax only adds capturable for foreach but will silently be incorrect for forloop/single-tensor. I've added erroring and modified testing with many many many skips for that. Honestly my preference after this PR has only been further cemented that we should just do the single tensor and multi tensor capturable implementations together in the future. @mlazos 4. The conditional for adding cuda-supported configs for the optimizer infos was incorrect! So we hadn't been testing capturable! This also stands rectified and was the trigger for this PR in the first place. 5. In a similar way, the conditional for `_get_optim_inputs_including_global_cliquey_kwargs` was incorrect sometimes as well. This has also been corrected. The following is not a bug, but is just something to make life simpler by not needing to handle Nones: `optim_input_funcs` must now mandatorily take in a `device`, which could be a string or a torch.device. Details for posterity: 4. Running the test_foreach_matches_forloop test and printing the configs that get printed yields capturable getting included, which is correct. ``` (pytorch-3.10) [janeyx@devgpu023.odn1 ~/local/pytorch (5d50138f)]$ python test/test_optim.py -k test_foreach_matches_forloop_AdamW_cuda /home/janeyx/.conda/envs/pytorch-3.10/lib/python3.10/site-packages/transformers/utils/generic.py:441: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. _torch_pytree._register_pytree_node( /home/janeyx/.conda/envs/pytorch-3.10/lib/python3.10/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.0 warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" params=None, kwargs={}, desc=default params=None, kwargs={'lr': 0.01}, desc=non-default lr params=None, kwargs={'weight_decay': 0.1}, desc=nonzero weight_decay params=None, kwargs={'weight_decay': 0.1, 'maximize': True}, desc=maximize params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True}, desc=amsgrad params=None, kwargs={'capturable': True}, desc=capturable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True}, desc=capturable, amsgrad params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True}, desc=Tensor lr with capturable and amsgrad . ---------------------------------------------------------------------- Ran 1 test in 19.229s OK ``` 5. Running the test_optimizer_can_be_printed test (which calls `_get_optim_inputs_including_global_cliquey_kwargs`) and printing what gets run is also now correct. ``` /home/janeyx/.conda/envs/pytorch-3.10/lib/python3.10/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.0 warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" params=None, kwargs={'differentiable': False}, desc=default params=None, kwargs={'differentiable': True}, desc=default & differentiable params=None, kwargs={'lr': 0.01, 'differentiable': False}, desc=non-default lr params=None, kwargs={'lr': 0.01, 'differentiable': True}, desc=non-default lr & differentiable params=None, kwargs={'weight_decay': 0.1, 'differentiable': False}, desc=nonzero weight_decay params=None, kwargs={'weight_decay': 0.1, 'differentiable': True}, desc=nonzero weight_decay & differentiable params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'differentiable': False}, desc=maximize params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'differentiable': True}, desc=maximize & differentiable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'differentiable': False}, desc=amsgrad params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'differentiable': True}, desc=amsgrad & differentiable .params=None, kwargs={'foreach': False, 'differentiable': False, 'fused': False}, desc=default params=None, kwargs={'foreach': True, 'differentiable': False, 'fused': False}, desc=default & foreach params=None, kwargs={'foreach': False, 'differentiable': True, 'fused': False}, desc=default & differentiable params=None, kwargs={'foreach': False, 'differentiable': False, 'fused': True}, desc=default & fused params=None, kwargs={'lr': 0.01, 'foreach': False, 'differentiable': False, 'fused': False}, desc=non-default lr params=None, kwargs={'lr': 0.01, 'foreach': True, 'differentiable': False, 'fused': False}, desc=non-default lr & foreach params=None, kwargs={'lr': 0.01, 'foreach': False, 'differentiable': True, 'fused': False}, desc=non-default lr & differentiable params=None, kwargs={'lr': 0.01, 'foreach': False, 'differentiable': False, 'fused': True}, desc=non-default lr & fused params=None, kwargs={'weight_decay': 0.1, 'foreach': False, 'differentiable': False, 'fused': False}, desc=nonzero weight_decay params=None, kwargs={'weight_decay': 0.1, 'foreach': True, 'differentiable': False, 'fused': False}, desc=nonzero weight_decay & foreach params=None, kwargs={'weight_decay': 0.1, 'foreach': False, 'differentiable': True, 'fused': False}, desc=nonzero weight_decay & differentiable params=None, kwargs={'weight_decay': 0.1, 'foreach': False, 'differentiable': False, 'fused': True}, desc=nonzero weight_decay & fused params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=maximize params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=maximize & foreach params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=maximize & differentiable params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=maximize & fused params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=amsgrad params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=amsgrad & foreach params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=amsgrad & differentiable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=amsgrad & fused params=None, kwargs={'capturable': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=capturable params=None, kwargs={'capturable': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=capturable & foreach params=None, kwargs={'capturable': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=capturable & differentiable params=None, kwargs={'capturable': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=capturable & fused params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=capturable, amsgrad params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=capturable, amsgrad & foreach params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=capturable, amsgrad & differentiable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=capturable, amsgrad & fused params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=Tensor lr with capturable and amsgrad params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=Tensor lr with capturable and amsgrad & foreach params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=Tensor lr with capturable and amsgrad & differentiable params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=Tensor lr with capturable and amsgrad & fused . ---------------------------------------------------------------------- Ran 2 tests in 11.112s OK ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/118326 Approved by: https://github.com/mlazos
661 lines
29 KiB
Python
661 lines
29 KiB
Python
from typing import List, Optional, Union, Tuple
|
|
|
|
import torch
|
|
from torch import Tensor
|
|
from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value,
|
|
_stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach,
|
|
_get_scalar_dtype, _capturable_doc, _differentiable_doc, _foreach_doc,
|
|
_fused_doc, _maximize_doc, _view_as_real)
|
|
from torch.utils._foreach_utils import _get_fused_kernels_supported_devices
|
|
|
|
__all__ = ['Adam', 'adam']
|
|
|
|
|
|
class Adam(Optimizer):
|
|
def __init__(self,
|
|
params: ParamsT,
|
|
lr: Union[float, Tensor] = 1e-3,
|
|
betas: Tuple[float, float] = (0.9, 0.999),
|
|
eps: float = 1e-8,
|
|
weight_decay: float = 0,
|
|
amsgrad: bool = False,
|
|
*,
|
|
foreach: Optional[bool] = None,
|
|
maximize: bool = False,
|
|
capturable: bool = False,
|
|
differentiable: bool = False,
|
|
fused: Optional[bool] = None):
|
|
if not 0.0 <= lr:
|
|
raise ValueError(f"Invalid learning rate: {lr}")
|
|
if isinstance(lr, Tensor) and foreach and not capturable:
|
|
raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True")
|
|
if not 0.0 <= eps:
|
|
raise ValueError(f"Invalid epsilon value: {eps}")
|
|
if not 0.0 <= betas[0] < 1.0:
|
|
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
|
if not 0.0 <= betas[1] < 1.0:
|
|
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
|
if not 0.0 <= weight_decay:
|
|
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
|
|
|
defaults = dict(lr=lr, betas=betas, eps=eps,
|
|
weight_decay=weight_decay, amsgrad=amsgrad,
|
|
maximize=maximize, foreach=foreach, capturable=capturable,
|
|
differentiable=differentiable, fused=fused)
|
|
super().__init__(params, defaults)
|
|
|
|
if fused:
|
|
if differentiable:
|
|
raise RuntimeError("`fused` does not support `differentiable`")
|
|
self._step_supports_amp_scaling = True
|
|
# TODO(crcrpar): [low prec params & their higher prec copy]
|
|
# Support AMP with FP16/BF16 model params which would need
|
|
# higher prec copy of params to do update math in higher prec to
|
|
# alleviate the loss of information.
|
|
fused_supported_devices = _get_fused_kernels_supported_devices()
|
|
if not all(
|
|
p.device.type in fused_supported_devices and
|
|
torch.is_floating_point(p) for pg in self.param_groups for p in pg['params']
|
|
):
|
|
raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of "
|
|
f"supported devices: {fused_supported_devices}.")
|
|
if foreach:
|
|
raise RuntimeError("`fused` and `foreach` cannot be `True` together.")
|
|
|
|
def __setstate__(self, state):
|
|
super().__setstate__(state)
|
|
for group in self.param_groups:
|
|
group.setdefault('amsgrad', False)
|
|
group.setdefault('maximize', False)
|
|
group.setdefault('foreach', None)
|
|
group.setdefault('capturable', False)
|
|
group.setdefault('differentiable', False)
|
|
fused = group.setdefault('fused', None)
|
|
for p in group["params"]:
|
|
p_state = self.state.get(p, [])
|
|
if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
|
|
step_val = float(p_state["step"])
|
|
p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(is_fused=fused), device=p.device)
|
|
if group['capturable'] or group['fused']
|
|
else torch.tensor(step_val, dtype=_get_scalar_dtype()))
|
|
|
|
def _init_group(
|
|
self,
|
|
group,
|
|
params_with_grad,
|
|
grads,
|
|
exp_avgs,
|
|
exp_avg_sqs,
|
|
max_exp_avg_sqs,
|
|
state_steps
|
|
):
|
|
has_complex = False
|
|
for p in group['params']:
|
|
if p.grad is not None:
|
|
has_complex |= torch.is_complex(p)
|
|
params_with_grad.append(p)
|
|
if p.grad.is_sparse:
|
|
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
|
|
grads.append(p.grad)
|
|
|
|
state = self.state[p]
|
|
# Lazy state initialization
|
|
if len(state) == 0:
|
|
# note(crcrpar): [special device hosting for step]
|
|
# Deliberately host `step` on CPU if both capturable and fused are off.
|
|
# This is because kernel launches are costly on CUDA and XLA.
|
|
state['step'] = (
|
|
torch.zeros((), dtype=_get_scalar_dtype(is_fused=group['fused']), device=p.device)
|
|
if group['capturable'] or group['fused']
|
|
else torch.tensor(0.0, dtype=_get_scalar_dtype())
|
|
)
|
|
# Exponential moving average of gradient values
|
|
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
|
# Exponential moving average of squared gradient values
|
|
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
|
if group['amsgrad']:
|
|
# Maintains max of all exp. moving avg. of sq. grad. values
|
|
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
|
|
|
|
exp_avgs.append(state['exp_avg'])
|
|
exp_avg_sqs.append(state['exp_avg_sq'])
|
|
|
|
if group['amsgrad']:
|
|
max_exp_avg_sqs.append(state['max_exp_avg_sq'])
|
|
if group['differentiable'] and state['step'].requires_grad:
|
|
raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode')
|
|
|
|
# Foreach without capturable does not support a tensor lr
|
|
if group['foreach'] and torch.is_tensor(group['lr']) and not group['capturable']:
|
|
raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True')
|
|
|
|
state_steps.append(state['step'])
|
|
return has_complex
|
|
|
|
@_use_grad_for_differentiable
|
|
def step(self, closure=None):
|
|
"""Perform a single optimization step.
|
|
|
|
Args:
|
|
closure (Callable, optional): A closure that reevaluates the model
|
|
and returns the loss.
|
|
"""
|
|
self._cuda_graph_capture_health_check()
|
|
|
|
loss = None
|
|
if closure is not None:
|
|
with torch.enable_grad():
|
|
loss = closure()
|
|
|
|
for group in self.param_groups:
|
|
params_with_grad = []
|
|
grads = []
|
|
exp_avgs = []
|
|
exp_avg_sqs = []
|
|
max_exp_avg_sqs = []
|
|
state_steps = []
|
|
beta1, beta2 = group['betas']
|
|
|
|
has_complex = self._init_group(
|
|
group,
|
|
params_with_grad,
|
|
grads,
|
|
exp_avgs,
|
|
exp_avg_sqs,
|
|
max_exp_avg_sqs,
|
|
state_steps)
|
|
|
|
adam(
|
|
params_with_grad,
|
|
grads,
|
|
exp_avgs,
|
|
exp_avg_sqs,
|
|
max_exp_avg_sqs,
|
|
state_steps,
|
|
amsgrad=group['amsgrad'],
|
|
has_complex=has_complex,
|
|
beta1=beta1,
|
|
beta2=beta2,
|
|
lr=group['lr'],
|
|
weight_decay=group['weight_decay'],
|
|
eps=group['eps'],
|
|
maximize=group['maximize'],
|
|
foreach=group['foreach'],
|
|
capturable=group['capturable'],
|
|
differentiable=group['differentiable'],
|
|
fused=group['fused'],
|
|
grad_scale=getattr(self, "grad_scale", None),
|
|
found_inf=getattr(self, "found_inf", None),
|
|
)
|
|
|
|
return loss
|
|
|
|
|
|
Adam.__doc__ = r"""Implements Adam algorithm.
|
|
|
|
.. math::
|
|
\begin{aligned}
|
|
&\rule{110mm}{0.4pt} \\
|
|
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2
|
|
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\
|
|
&\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad},
|
|
\:\textit{maximize} \\
|
|
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
|
v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
|
|
&\rule{110mm}{0.4pt} \\
|
|
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
|
|
|
&\hspace{5mm}\textbf{if} \: \textit{maximize}: \\
|
|
&\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\
|
|
&\hspace{5mm}\textbf{else} \\
|
|
&\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
|
&\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\
|
|
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
|
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
|
&\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\
|
|
&\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\
|
|
&\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\
|
|
&\hspace{5mm}\textbf{if} \: amsgrad \\
|
|
&\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
|
|
\widehat{v_t}) \\
|
|
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
|
|
\big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\
|
|
&\hspace{5mm}\textbf{else} \\
|
|
&\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
|
|
\big(\sqrt{\widehat{v_t}} + \epsilon \big) \\
|
|
&\rule{110mm}{0.4pt} \\[-1.ex]
|
|
&\bf{return} \: \theta_t \\[-1.ex]
|
|
&\rule{110mm}{0.4pt} \\[-1.ex]
|
|
\end{aligned}
|
|
|
|
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
|
|
""" + fr"""
|
|
Args:
|
|
params (iterable): iterable of parameters to optimize or dicts defining
|
|
parameter groups
|
|
lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
|
|
is not yet supported for all our implementations. Please use a float
|
|
LR if you are not also specifying fused=True or capturable=True.
|
|
betas (Tuple[float, float], optional): coefficients used for computing
|
|
running averages of gradient and its square (default: (0.9, 0.999))
|
|
eps (float, optional): term added to the denominator to improve
|
|
numerical stability (default: 1e-8)
|
|
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
|
amsgrad (bool, optional): whether to use the AMSGrad variant of this
|
|
algorithm from the paper `On the Convergence of Adam and Beyond`_
|
|
(default: False)
|
|
{_foreach_doc}
|
|
{_maximize_doc}
|
|
{_capturable_doc}
|
|
{_differentiable_doc}
|
|
{_fused_doc}
|
|
.. _Adam\: A Method for Stochastic Optimization:
|
|
https://arxiv.org/abs/1412.6980
|
|
.. _On the Convergence of Adam and Beyond:
|
|
https://openreview.net/forum?id=ryQu7f-RZ
|
|
|
|
"""
|
|
|
|
|
|
def adam(params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_avg_sqs: List[Tensor],
|
|
max_exp_avg_sqs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
|
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
|
foreach: Optional[bool] = None,
|
|
capturable: bool = False,
|
|
differentiable: bool = False,
|
|
fused: Optional[bool] = None,
|
|
grad_scale: Optional[Tensor] = None,
|
|
found_inf: Optional[Tensor] = None,
|
|
has_complex: bool = False,
|
|
*,
|
|
amsgrad: bool,
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: Union[float, Tensor],
|
|
weight_decay: float,
|
|
eps: float,
|
|
maximize: bool):
|
|
r"""Functional API that performs Adam algorithm computation.
|
|
|
|
See :class:`~torch.optim.Adam` for details.
|
|
"""
|
|
# Respect when the user inputs False/True for foreach or fused. We only want to change
|
|
# the default when neither have been user-specified. Note that we default to foreach
|
|
# and pass False to use_fused. This is not a mistake--we want to give the fused impl
|
|
# bake-in time before making it the default, even if it is typically faster.
|
|
if fused is None and foreach is None:
|
|
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
|
|
# Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False.
|
|
if foreach and isinstance(lr, Tensor) and not capturable:
|
|
foreach = False
|
|
if fused is None:
|
|
fused = False
|
|
if foreach is None:
|
|
foreach = False
|
|
|
|
# this check is slow during compilation, so we skip it
|
|
# if it's strictly needed we can add this check back in dynamo
|
|
if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
|
|
raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")
|
|
|
|
if foreach and torch.jit.is_scripting():
|
|
raise RuntimeError('torch.jit.script not supported with foreach optimizers')
|
|
if fused and torch.jit.is_scripting():
|
|
raise RuntimeError("torch.jit.script not supported with fused optimizers")
|
|
|
|
if fused and not torch.jit.is_scripting():
|
|
func = _fused_adam
|
|
elif foreach and not torch.jit.is_scripting():
|
|
func = _multi_tensor_adam
|
|
else:
|
|
func = _single_tensor_adam
|
|
|
|
func(params,
|
|
grads,
|
|
exp_avgs,
|
|
exp_avg_sqs,
|
|
max_exp_avg_sqs,
|
|
state_steps,
|
|
amsgrad=amsgrad,
|
|
has_complex=has_complex,
|
|
beta1=beta1,
|
|
beta2=beta2,
|
|
lr=lr,
|
|
weight_decay=weight_decay,
|
|
eps=eps,
|
|
maximize=maximize,
|
|
capturable=capturable,
|
|
differentiable=differentiable,
|
|
grad_scale=grad_scale,
|
|
found_inf=found_inf)
|
|
|
|
|
|
def _single_tensor_adam(params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_avg_sqs: List[Tensor],
|
|
max_exp_avg_sqs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
grad_scale: Optional[Tensor],
|
|
found_inf: Optional[Tensor],
|
|
*,
|
|
amsgrad: bool,
|
|
has_complex: bool,
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: Union[float, Tensor],
|
|
weight_decay: float,
|
|
eps: float,
|
|
maximize: bool,
|
|
capturable: bool,
|
|
differentiable: bool):
|
|
|
|
assert grad_scale is None and found_inf is None
|
|
|
|
if torch.jit.is_scripting():
|
|
# this assert is due to JIT being dumb and not realizing that the ops below
|
|
# have overloads to handle both float and Tensor lrs, so we just assert it's
|
|
# a float since most people using JIT are using floats
|
|
assert isinstance(lr, float)
|
|
|
|
for i, param in enumerate(params):
|
|
grad = grads[i] if not maximize else -grads[i]
|
|
exp_avg = exp_avgs[i]
|
|
exp_avg_sq = exp_avg_sqs[i]
|
|
step_t = state_steps[i]
|
|
|
|
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
|
if not torch._utils.is_compiling() and capturable:
|
|
assert (
|
|
(param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla)
|
|
), "If capturable=True, params and state_steps must be CUDA or XLA tensors."
|
|
|
|
# update step
|
|
step_t += 1
|
|
|
|
if weight_decay != 0:
|
|
grad = grad.add(param, alpha=weight_decay)
|
|
|
|
if torch.is_complex(param):
|
|
grad = torch.view_as_real(grad)
|
|
exp_avg = torch.view_as_real(exp_avg)
|
|
exp_avg_sq = torch.view_as_real(exp_avg_sq)
|
|
if amsgrad:
|
|
max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i])
|
|
param = torch.view_as_real(param)
|
|
|
|
# Decay the first and second moment running average coefficient
|
|
exp_avg.lerp_(grad, 1 - beta1)
|
|
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
|
|
|
|
if capturable or differentiable:
|
|
step = step_t
|
|
|
|
bias_correction1 = 1 - beta1 ** step
|
|
bias_correction2 = 1 - beta2 ** step
|
|
|
|
step_size = lr / bias_correction1
|
|
step_size_neg = step_size.neg()
|
|
|
|
bias_correction2_sqrt = bias_correction2.sqrt()
|
|
|
|
if amsgrad:
|
|
# Maintains the maximum of all 2nd moment running avg. till now
|
|
if differentiable:
|
|
max_exp_avg_sq = max_exp_avg_sqs[i].clone()
|
|
else:
|
|
max_exp_avg_sq = max_exp_avg_sqs[i]
|
|
|
|
max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq))
|
|
|
|
# Uses the max. for normalizing running avg. of gradient
|
|
# Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write
|
|
# (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor)
|
|
denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
|
|
else:
|
|
denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg)
|
|
|
|
param.addcdiv_(exp_avg, denom)
|
|
else:
|
|
step = _get_value(step_t)
|
|
|
|
bias_correction1 = 1 - beta1 ** step
|
|
bias_correction2 = 1 - beta2 ** step
|
|
|
|
step_size = lr / bias_correction1
|
|
|
|
bias_correction2_sqrt = _dispatch_sqrt(bias_correction2)
|
|
|
|
if amsgrad:
|
|
# Maintains the maximum of all 2nd moment running avg. till now
|
|
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
|
|
|
|
# Use the max. for normalizing running avg. of gradient
|
|
denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps)
|
|
else:
|
|
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps)
|
|
|
|
param.addcdiv_(exp_avg, denom, value=-step_size)
|
|
|
|
# Lastly, switch back to complex view
|
|
if amsgrad and torch.is_complex(params[i]):
|
|
max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i])
|
|
|
|
|
|
def _multi_tensor_adam(params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_avg_sqs: List[Tensor],
|
|
max_exp_avg_sqs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
grad_scale: Optional[Tensor],
|
|
found_inf: Optional[Tensor],
|
|
*,
|
|
amsgrad: bool,
|
|
has_complex: bool,
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: Union[float, Tensor],
|
|
weight_decay: float,
|
|
eps: float,
|
|
maximize: bool,
|
|
capturable: bool,
|
|
differentiable: bool):
|
|
if len(params) == 0:
|
|
return
|
|
|
|
if isinstance(lr, Tensor) and not capturable:
|
|
raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True")
|
|
|
|
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
|
if not torch._utils.is_compiling() and capturable:
|
|
assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \
|
|
"If capturable=True, params and state_steps must be CUDA tensors."
|
|
|
|
assert grad_scale is None and found_inf is None
|
|
|
|
assert not differentiable, "_foreach ops don't support autograd"
|
|
|
|
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
|
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
|
|
for ((
|
|
device_params,
|
|
device_grads,
|
|
device_exp_avgs,
|
|
device_exp_avg_sqs,
|
|
device_max_exp_avg_sqs,
|
|
device_state_steps,
|
|
), _) in grouped_tensors.values():
|
|
|
|
# Handle complex parameters
|
|
if has_complex:
|
|
if amsgrad:
|
|
_view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs)
|
|
else:
|
|
_view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs)
|
|
|
|
if maximize:
|
|
device_grads = torch._foreach_neg(device_grads)
|
|
|
|
# Update steps
|
|
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
|
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
|
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
|
if device_state_steps[0].is_cpu:
|
|
torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
|
|
else:
|
|
torch._foreach_add_(device_state_steps, 1)
|
|
|
|
if weight_decay != 0:
|
|
# Re-use the intermediate memory (device_grads) already allocated for maximize
|
|
if maximize:
|
|
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
|
|
else:
|
|
device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
|
|
|
|
# Decay the first and second moment running average coefficient
|
|
torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1)
|
|
|
|
torch._foreach_mul_(device_exp_avg_sqs, beta2)
|
|
torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2)
|
|
|
|
# Delete the local intermediate since it won't be used anymore to save on peak memory
|
|
del device_grads
|
|
|
|
if capturable:
|
|
bias_correction1 = torch._foreach_pow(beta1, device_state_steps)
|
|
bias_correction2 = torch._foreach_pow(beta2, device_state_steps)
|
|
# foreach_sub doesn't allow a scalar as the first arg
|
|
torch._foreach_sub_(bias_correction1, 1)
|
|
torch._foreach_sub_(bias_correction2, 1)
|
|
# we do not negate bias_correction1 as it'll need to be negated later anyway
|
|
torch._foreach_neg_(bias_correction2)
|
|
|
|
# foreach_div doesn't allow a scalar as the first arg
|
|
torch._foreach_div_(bias_correction1, lr)
|
|
torch._foreach_reciprocal_(bias_correction1)
|
|
|
|
torch._foreach_sqrt_(bias_correction2)
|
|
|
|
# Re-assign for clarity as we maintain minimal intermediates: we'll have
|
|
# step_size = - lr / (1 - beta1 ^ t) where t = num_steps
|
|
# bias_correction2_sqrt = sqrt(1 - beta2 ^ t)
|
|
step_size = bias_correction1
|
|
bias_correction2_sqrt = bias_correction2
|
|
|
|
if amsgrad:
|
|
# Maintains the maximum of all 2nd moment running avg. till now
|
|
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) # type: ignore[assignment]
|
|
|
|
# Set intermediate to the max. for normalizing running avg. of gradient when amsgrad
|
|
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
|
|
else:
|
|
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
|
|
|
|
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
|
|
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
|
torch._foreach_div_(exp_avg_sq_sqrt, step_size)
|
|
|
|
# at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr
|
|
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt)
|
|
else:
|
|
bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps]
|
|
bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps]
|
|
|
|
step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1])
|
|
|
|
bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2]
|
|
|
|
if amsgrad:
|
|
# Maintains the maximum of all 2nd moment running avg. till now
|
|
torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs)
|
|
|
|
# Use the max. for normalizing running avg. of gradient
|
|
exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs)
|
|
else:
|
|
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
|
|
|
|
torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt)
|
|
torch._foreach_add_(exp_avg_sq_sqrt, eps)
|
|
torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size)
|
|
|
|
|
|
def _fused_adam(
|
|
params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_avg_sqs: List[Tensor],
|
|
max_exp_avg_sqs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
grad_scale: Optional[Tensor],
|
|
found_inf: Optional[Tensor],
|
|
*,
|
|
amsgrad: bool,
|
|
has_complex: bool, # Needed for consistency.
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: Union[float, Tensor],
|
|
weight_decay: float,
|
|
eps: float,
|
|
maximize: bool,
|
|
capturable: bool, # Needed for consistency.
|
|
differentiable: bool,
|
|
) -> None:
|
|
if not params:
|
|
return
|
|
if differentiable:
|
|
raise RuntimeError("Adam with fused=True does not support differentiable=True")
|
|
|
|
grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None
|
|
found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None
|
|
|
|
# We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer
|
|
# treating it as a scalar.
|
|
lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None
|
|
|
|
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
|
|
[params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps])
|
|
for (device, _), ((device_params,
|
|
device_grads,
|
|
device_exp_avgs,
|
|
device_exp_avg_sqs,
|
|
device_max_exp_avg_sqs,
|
|
device_state_steps,), _) in grouped_tensors.items():
|
|
device_grad_scale, device_found_inf = None, None
|
|
if grad_scale is not None:
|
|
if device not in grad_scale_dict:
|
|
grad_scale_dict[device] = grad_scale.to(device, non_blocking=True)
|
|
device_grad_scale = grad_scale_dict[device]
|
|
if found_inf is not None:
|
|
if found_inf not in found_inf_dict:
|
|
found_inf_dict[device] = found_inf.to(device, non_blocking=True)
|
|
device_found_inf = found_inf_dict[device]
|
|
if lr_dict is not None and device not in lr_dict:
|
|
lr_dict[device] = lr.to(device=device, non_blocking=True)
|
|
lr = lr_dict[device]
|
|
torch._foreach_add_(device_state_steps, 1)
|
|
torch._fused_adam_(
|
|
device_params,
|
|
device_grads,
|
|
device_exp_avgs,
|
|
device_exp_avg_sqs,
|
|
device_max_exp_avg_sqs,
|
|
device_state_steps,
|
|
amsgrad=amsgrad,
|
|
lr=lr,
|
|
beta1=beta1,
|
|
beta2=beta2,
|
|
weight_decay=weight_decay,
|
|
eps=eps,
|
|
maximize=maximize,
|
|
grad_scale=device_grad_scale,
|
|
found_inf=device_found_inf,
|
|
)
|
|
if device_found_inf is not None:
|
|
torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps))
|