mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 13:44:15 +08:00
This PR fixes several bugs, listed in priority: 1. `load_state_dict` with a nontensor step was incorrect for capturable and fused implementations since we don't create the tensors on the right device in `__setstate__`. This has been fixed. 2. The most recently added capturable implementations forgot the check that all tensors should be on CUDA for eager. We've now added those checks 3. The most recent change in Adamax only adds capturable for foreach but will silently be incorrect for forloop/single-tensor. I've added erroring and modified testing with many many many skips for that. Honestly my preference after this PR has only been further cemented that we should just do the single tensor and multi tensor capturable implementations together in the future. @mlazos 4. The conditional for adding cuda-supported configs for the optimizer infos was incorrect! So we hadn't been testing capturable! This also stands rectified and was the trigger for this PR in the first place. 5. In a similar way, the conditional for `_get_optim_inputs_including_global_cliquey_kwargs` was incorrect sometimes as well. This has also been corrected. The following is not a bug, but is just something to make life simpler by not needing to handle Nones: `optim_input_funcs` must now mandatorily take in a `device`, which could be a string or a torch.device. Details for posterity: 4. Running the test_foreach_matches_forloop test and printing the configs that get printed yields capturable getting included, which is correct. ``` (pytorch-3.10) [janeyx@devgpu023.odn1 ~/local/pytorch (5d50138f)]$ python test/test_optim.py -k test_foreach_matches_forloop_AdamW_cuda /home/janeyx/.conda/envs/pytorch-3.10/lib/python3.10/site-packages/transformers/utils/generic.py:441: UserWarning: torch.utils._pytree._register_pytree_node is deprecated. Please use torch.utils._pytree.register_pytree_node instead. _torch_pytree._register_pytree_node( /home/janeyx/.conda/envs/pytorch-3.10/lib/python3.10/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.0 warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" params=None, kwargs={}, desc=default params=None, kwargs={'lr': 0.01}, desc=non-default lr params=None, kwargs={'weight_decay': 0.1}, desc=nonzero weight_decay params=None, kwargs={'weight_decay': 0.1, 'maximize': True}, desc=maximize params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True}, desc=amsgrad params=None, kwargs={'capturable': True}, desc=capturable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True}, desc=capturable, amsgrad params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True}, desc=Tensor lr with capturable and amsgrad . ---------------------------------------------------------------------- Ran 1 test in 19.229s OK ``` 5. Running the test_optimizer_can_be_printed test (which calls `_get_optim_inputs_including_global_cliquey_kwargs`) and printing what gets run is also now correct. ``` /home/janeyx/.conda/envs/pytorch-3.10/lib/python3.10/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.0 warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" params=None, kwargs={'differentiable': False}, desc=default params=None, kwargs={'differentiable': True}, desc=default & differentiable params=None, kwargs={'lr': 0.01, 'differentiable': False}, desc=non-default lr params=None, kwargs={'lr': 0.01, 'differentiable': True}, desc=non-default lr & differentiable params=None, kwargs={'weight_decay': 0.1, 'differentiable': False}, desc=nonzero weight_decay params=None, kwargs={'weight_decay': 0.1, 'differentiable': True}, desc=nonzero weight_decay & differentiable params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'differentiable': False}, desc=maximize params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'differentiable': True}, desc=maximize & differentiable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'differentiable': False}, desc=amsgrad params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'differentiable': True}, desc=amsgrad & differentiable .params=None, kwargs={'foreach': False, 'differentiable': False, 'fused': False}, desc=default params=None, kwargs={'foreach': True, 'differentiable': False, 'fused': False}, desc=default & foreach params=None, kwargs={'foreach': False, 'differentiable': True, 'fused': False}, desc=default & differentiable params=None, kwargs={'foreach': False, 'differentiable': False, 'fused': True}, desc=default & fused params=None, kwargs={'lr': 0.01, 'foreach': False, 'differentiable': False, 'fused': False}, desc=non-default lr params=None, kwargs={'lr': 0.01, 'foreach': True, 'differentiable': False, 'fused': False}, desc=non-default lr & foreach params=None, kwargs={'lr': 0.01, 'foreach': False, 'differentiable': True, 'fused': False}, desc=non-default lr & differentiable params=None, kwargs={'lr': 0.01, 'foreach': False, 'differentiable': False, 'fused': True}, desc=non-default lr & fused params=None, kwargs={'weight_decay': 0.1, 'foreach': False, 'differentiable': False, 'fused': False}, desc=nonzero weight_decay params=None, kwargs={'weight_decay': 0.1, 'foreach': True, 'differentiable': False, 'fused': False}, desc=nonzero weight_decay & foreach params=None, kwargs={'weight_decay': 0.1, 'foreach': False, 'differentiable': True, 'fused': False}, desc=nonzero weight_decay & differentiable params=None, kwargs={'weight_decay': 0.1, 'foreach': False, 'differentiable': False, 'fused': True}, desc=nonzero weight_decay & fused params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=maximize params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=maximize & foreach params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=maximize & differentiable params=None, kwargs={'weight_decay': 0.1, 'maximize': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=maximize & fused params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=amsgrad params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=amsgrad & foreach params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=amsgrad & differentiable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=amsgrad & fused params=None, kwargs={'capturable': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=capturable params=None, kwargs={'capturable': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=capturable & foreach params=None, kwargs={'capturable': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=capturable & differentiable params=None, kwargs={'capturable': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=capturable & fused params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=capturable, amsgrad params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=capturable, amsgrad & foreach params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=capturable, amsgrad & differentiable params=None, kwargs={'weight_decay': 0.1, 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=capturable, amsgrad & fused params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': False}, desc=Tensor lr with capturable and amsgrad params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': True, 'differentiable': False, 'fused': False}, desc=Tensor lr with capturable and amsgrad & foreach params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': True, 'fused': False}, desc=Tensor lr with capturable and amsgrad & differentiable params=None, kwargs={'lr': tensor(0.0010), 'amsgrad': True, 'capturable': True, 'foreach': False, 'differentiable': False, 'fused': True}, desc=Tensor lr with capturable and amsgrad & fused . ---------------------------------------------------------------------- Ran 2 tests in 11.112s OK ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/118326 Approved by: https://github.com/mlazos
397 lines
14 KiB
Python
397 lines
14 KiB
Python
import torch
|
|
from torch import Tensor
|
|
|
|
from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _default_to_fused_or_foreach,
|
|
_get_scalar_dtype, _differentiable_doc, _maximize_doc, _foreach_doc, _view_as_real,
|
|
_capturable_doc)
|
|
from typing import List, Optional
|
|
|
|
__all__ = ["Adamax", "adamax"]
|
|
|
|
|
|
class Adamax(Optimizer):
|
|
def __init__(
|
|
self,
|
|
params,
|
|
lr=2e-3,
|
|
betas=(0.9, 0.999),
|
|
eps=1e-8,
|
|
weight_decay=0,
|
|
foreach: Optional[bool] = None,
|
|
*,
|
|
maximize: bool = False,
|
|
differentiable: bool = False,
|
|
capturable: bool = False,
|
|
):
|
|
if not 0.0 <= lr:
|
|
raise ValueError(f"Invalid learning rate: {lr}")
|
|
if not 0.0 <= eps:
|
|
raise ValueError(f"Invalid epsilon value: {eps}")
|
|
if not 0.0 <= betas[0] < 1.0:
|
|
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
|
|
if not 0.0 <= betas[1] < 1.0:
|
|
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
|
|
if not 0.0 <= weight_decay:
|
|
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
|
|
|
|
if foreach is False and capturable:
|
|
raise ValueError("Capturable not supported with single tensor Adamax")
|
|
|
|
defaults = dict(
|
|
lr=lr,
|
|
betas=betas,
|
|
eps=eps,
|
|
weight_decay=weight_decay,
|
|
foreach=foreach,
|
|
maximize=maximize,
|
|
differentiable=differentiable,
|
|
capturable=capturable,
|
|
)
|
|
super().__init__(params, defaults)
|
|
|
|
def __setstate__(self, state):
|
|
super().__setstate__(state)
|
|
for group in self.param_groups:
|
|
group.setdefault("foreach", None)
|
|
group.setdefault("maximize", False)
|
|
group.setdefault("differentiable", False)
|
|
group.setdefault("capturable", False)
|
|
for p in group["params"]:
|
|
p_state = self.state.get(p, [])
|
|
if len(p_state) != 0 and not torch.is_tensor(p_state['step']):
|
|
step_val = float(p_state["step"])
|
|
p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(), device=p.device) if group['capturable']
|
|
else torch.tensor(step_val, dtype=_get_scalar_dtype()))
|
|
|
|
def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_infs, state_steps):
|
|
has_complex = False
|
|
for p in group["params"]:
|
|
if p.grad is None:
|
|
continue
|
|
has_complex |= torch.is_complex(p)
|
|
params_with_grad.append(p)
|
|
if p.grad.is_sparse:
|
|
raise RuntimeError("Adamax does not support sparse gradients")
|
|
grads.append(p.grad)
|
|
|
|
state = self.state[p]
|
|
|
|
# State initialization
|
|
if len(state) == 0:
|
|
state['step'] = (torch.zeros((), dtype=_get_scalar_dtype(), device=p.device)
|
|
if group['capturable'] else torch.tensor(0.0, dtype=_get_scalar_dtype()))
|
|
state["exp_avg"] = torch.zeros_like(
|
|
p, memory_format=torch.preserve_format
|
|
)
|
|
state["exp_inf"] = torch.zeros_like(
|
|
p, memory_format=torch.preserve_format
|
|
)
|
|
|
|
exp_avgs.append(state["exp_avg"])
|
|
exp_infs.append(state["exp_inf"])
|
|
state_steps.append(state["step"])
|
|
|
|
return has_complex
|
|
|
|
@_use_grad_for_differentiable
|
|
def step(self, closure=None):
|
|
"""Performs a single optimization step.
|
|
|
|
Args:
|
|
closure (Callable, optional): A closure that reevaluates the model
|
|
and returns the loss.
|
|
"""
|
|
loss = None
|
|
if closure is not None:
|
|
with torch.enable_grad():
|
|
loss = closure()
|
|
|
|
for group in self.param_groups:
|
|
params_with_grad = []
|
|
grads = []
|
|
exp_avgs = []
|
|
exp_infs = []
|
|
state_steps = []
|
|
|
|
beta1, beta2 = group["betas"]
|
|
eps = group["eps"]
|
|
lr = group["lr"]
|
|
weight_decay = group["weight_decay"]
|
|
foreach = group["foreach"]
|
|
maximize = group["maximize"]
|
|
differentiable = group["differentiable"]
|
|
capturable = group["capturable"]
|
|
|
|
has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_infs, state_steps)
|
|
|
|
adamax(
|
|
params_with_grad,
|
|
grads,
|
|
exp_avgs,
|
|
exp_infs,
|
|
state_steps,
|
|
eps=eps,
|
|
beta1=beta1,
|
|
beta2=beta2,
|
|
lr=lr,
|
|
weight_decay=weight_decay,
|
|
foreach=foreach,
|
|
maximize=maximize,
|
|
differentiable=differentiable,
|
|
capturable=capturable,
|
|
has_complex=has_complex,
|
|
)
|
|
|
|
return loss
|
|
|
|
|
|
Adamax.__doc__ = r"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
|
|
|
|
.. math::
|
|
\begin{aligned}
|
|
&\rule{110mm}{0.4pt} \\
|
|
&\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2
|
|
\text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)},
|
|
\: \lambda \text{ (weight decay)}, \\
|
|
&\hspace{13mm} \epsilon \text{ (epsilon)} \\
|
|
&\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)},
|
|
u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex]
|
|
&\rule{110mm}{0.4pt} \\
|
|
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\
|
|
&\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\
|
|
&\hspace{5mm}if \: \lambda \neq 0 \\
|
|
&\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\
|
|
&\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\
|
|
&\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\
|
|
&\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\
|
|
&\rule{110mm}{0.4pt} \\[-1.ex]
|
|
&\bf{return} \: \theta_t \\[-1.ex]
|
|
&\rule{110mm}{0.4pt} \\[-1.ex]
|
|
\end{aligned}
|
|
|
|
For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
|
|
""" + fr"""
|
|
Args:
|
|
params (iterable): iterable of parameters to optimize or dicts defining
|
|
parameter groups
|
|
lr (float, optional): learning rate (default: 2e-3)
|
|
betas (Tuple[float, float], optional): coefficients used for computing
|
|
running averages of gradient and its square
|
|
eps (float, optional): term added to the denominator to improve
|
|
numerical stability (default: 1e-8)
|
|
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
|
|
{_foreach_doc}
|
|
{_maximize_doc}
|
|
{_differentiable_doc}
|
|
{_capturable_doc}
|
|
|
|
.. _Adam\: A Method for Stochastic Optimization:
|
|
https://arxiv.org/abs/1412.6980
|
|
|
|
"""
|
|
|
|
|
|
def adamax(
|
|
params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_infs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
|
|
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
|
|
foreach: Optional[bool] = None,
|
|
maximize: bool = False,
|
|
differentiable: bool = False,
|
|
capturable: bool = False,
|
|
has_complex: bool = False,
|
|
*,
|
|
eps: float,
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: float,
|
|
weight_decay: float,
|
|
):
|
|
r"""Functional API that performs adamax algorithm computation.
|
|
|
|
See :class:`~torch.optim.Adamax` for details.
|
|
"""
|
|
|
|
if not all(isinstance(t, torch.Tensor) for t in state_steps):
|
|
raise RuntimeError(
|
|
"API has changed, `state_steps` argument must contain a list of singleton tensors"
|
|
)
|
|
|
|
if foreach is None:
|
|
_, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)
|
|
|
|
if foreach and torch.jit.is_scripting():
|
|
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
|
|
|
|
if foreach and not torch.jit.is_scripting():
|
|
func = _multi_tensor_adamax
|
|
else:
|
|
func = _single_tensor_adamax
|
|
|
|
func(
|
|
params,
|
|
grads,
|
|
exp_avgs,
|
|
exp_infs,
|
|
state_steps,
|
|
eps=eps,
|
|
beta1=beta1,
|
|
beta2=beta2,
|
|
lr=lr,
|
|
weight_decay=weight_decay,
|
|
maximize=maximize,
|
|
differentiable=differentiable,
|
|
has_complex=has_complex,
|
|
capturable=capturable,
|
|
)
|
|
|
|
|
|
def _single_tensor_adamax(
|
|
params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_infs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
*,
|
|
eps: float,
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: float,
|
|
weight_decay: float,
|
|
maximize: bool,
|
|
differentiable: bool,
|
|
capturable: bool,
|
|
has_complex: bool,
|
|
):
|
|
if capturable:
|
|
raise RuntimeError("capturable is not supported for single tensor Adamax (when foreach=False)")
|
|
|
|
for i, param in enumerate(params):
|
|
grad = grads[i]
|
|
grad = grad if not maximize else -grad
|
|
exp_avg = exp_avgs[i]
|
|
exp_inf = exp_infs[i]
|
|
step_t = state_steps[i]
|
|
|
|
# update step
|
|
step_t += 1
|
|
|
|
if weight_decay != 0:
|
|
grad = grad.add(param, alpha=weight_decay)
|
|
|
|
if torch.is_complex(param):
|
|
param = torch.view_as_real(param)
|
|
grad = torch.view_as_real(grad)
|
|
exp_avg = torch.view_as_real(exp_avg)
|
|
exp_inf = torch.view_as_real(exp_inf)
|
|
|
|
# Update biased first moment estimate.
|
|
exp_avg.lerp_(grad, 1 - beta1)
|
|
# Update the exponentially weighted infinity norm.
|
|
if not differentiable:
|
|
torch.maximum(
|
|
exp_inf.mul_(beta2),
|
|
grad.abs().add_(eps),
|
|
out=exp_inf,
|
|
)
|
|
else:
|
|
norm_buf = torch.cat(
|
|
[exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0)], 0
|
|
)
|
|
exp_inf.copy_(torch.amax(norm_buf, 0, keepdim=False))
|
|
|
|
bias_correction = 1 - beta1 ** _get_value(step_t)
|
|
clr = lr / bias_correction
|
|
|
|
param.addcdiv_(exp_avg, exp_inf, value=-clr)
|
|
|
|
|
|
def _multi_tensor_adamax(
|
|
params: List[Tensor],
|
|
grads: List[Tensor],
|
|
exp_avgs: List[Tensor],
|
|
exp_infs: List[Tensor],
|
|
state_steps: List[Tensor],
|
|
*,
|
|
beta1: float,
|
|
beta2: float,
|
|
lr: float,
|
|
weight_decay: float,
|
|
eps: float,
|
|
maximize: bool,
|
|
differentiable: bool,
|
|
capturable: bool,
|
|
has_complex: bool,
|
|
):
|
|
|
|
assert not differentiable, "_foreach ops don't support autograd"
|
|
|
|
if len(params) == 0:
|
|
return
|
|
|
|
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
|
|
if (not torch._utils.is_compiling() and capturable
|
|
and not all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps))):
|
|
raise RuntimeError("If capturable=True, params and state_steps must be CUDA tensors.")
|
|
|
|
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_infs, state_steps])
|
|
for ((grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs, grouped_state_steps), _) in grouped_tensors.values():
|
|
if has_complex:
|
|
_view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_infs)
|
|
|
|
if maximize:
|
|
grouped_grads = torch._foreach_neg(grouped_grads)
|
|
|
|
# Update steps
|
|
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
|
|
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
|
|
# wrapped it once now. The alpha is required to assure we go to the right overload.
|
|
if grouped_state_steps[0].is_cpu:
|
|
torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)
|
|
else:
|
|
torch._foreach_add_(grouped_state_steps, 1)
|
|
|
|
if weight_decay != 0:
|
|
if maximize:
|
|
# Re-use the intermediate memory (grouped_grads) already allocated for maximize
|
|
torch._foreach_add_(grouped_grads, grouped_params, alpha=weight_decay)
|
|
else:
|
|
grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)
|
|
|
|
|
|
# Update biased first moment estimate.
|
|
torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)
|
|
|
|
# Update the exponentially weighted infinity norm.
|
|
torch._foreach_mul_(grouped_exp_infs, beta2)
|
|
|
|
# in this case, we need to introduce a copy of the grads
|
|
# since one has not been introduced previously
|
|
if not maximize and weight_decay == 0:
|
|
grouped_grads = torch._foreach_abs(grouped_grads)
|
|
else:
|
|
torch._foreach_abs_(grouped_grads)
|
|
|
|
torch._foreach_add_(grouped_grads, eps)
|
|
torch._foreach_maximum_(grouped_exp_infs, grouped_grads)
|
|
|
|
if capturable:
|
|
bias_corrections = torch._foreach_pow(beta1, grouped_state_steps)
|
|
# foreach_sub doesn't allow a scalar as the first arg
|
|
torch._foreach_sub_(bias_corrections, 1)
|
|
|
|
# foreach_div doesn't allow a scalar as the first arg
|
|
torch._foreach_div_(bias_corrections, lr)
|
|
torch._foreach_reciprocal_(bias_corrections)
|
|
|
|
numerator = torch._foreach_mul(grouped_exp_avgs, bias_corrections)
|
|
torch._foreach_addcdiv_(grouped_params, numerator, grouped_exp_infs)
|
|
else:
|
|
bias_corrections = [1 - beta1 ** _get_value(step) for step in grouped_state_steps]
|
|
step_size = [(lr / bc) * -1 for bc in bias_corrections]
|
|
torch._foreach_addcdiv_(grouped_params, grouped_exp_avgs, grouped_exp_infs, step_size)
|