mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[BE] Enable Ruff's Flake8 PYI042 (#111114)
Enable [snake-case-type-alias (PYI042)](https://docs.astral.sh/ruff/rules/snake-case-type-alias/) Link: #110950 Pull Request resolved: https://github.com/pytorch/pytorch/pull/111114 Approved by: https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
5db9f911ac
commit
b460c30893
@ -52,7 +52,6 @@ ignore = [
|
||||
"PYI034",
|
||||
"PYI036",
|
||||
"PYI041",
|
||||
"PYI042",
|
||||
"PYI045",
|
||||
"PYI056",
|
||||
"SIM102", "SIM103", "SIM112", # flake8-simplify code styles
|
||||
|
@ -1,9 +1,9 @@
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class Adadelta(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
rho: float = ...,
|
||||
eps: float = ...,
|
||||
|
@ -1,9 +1,9 @@
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class Adagrad(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
lr_decay: float = ...,
|
||||
weight_decay: float = ...,
|
||||
|
@ -2,7 +2,7 @@ from typing import List, Optional, Union, Tuple
|
||||
|
||||
import torch
|
||||
from torch import Tensor
|
||||
from .optimizer import (Optimizer, params_t, _use_grad_for_differentiable, _get_value,
|
||||
from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value,
|
||||
_stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach,
|
||||
_capturable_doc, _differentiable_doc, _foreach_doc, _fused_doc,
|
||||
_maximize_doc)
|
||||
@ -13,7 +13,7 @@ __all__ = ['Adam', 'adam']
|
||||
|
||||
class Adam(Optimizer):
|
||||
def __init__(self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: Union[float, Tensor] = 1e-3,
|
||||
betas: Tuple[float, float] = (0.9, 0.999),
|
||||
eps: float = 1e-8,
|
||||
|
@ -2,12 +2,12 @@ from typing import Optional, Tuple, Union
|
||||
|
||||
from torch import Tensor
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class Adam(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: Union[float, Tensor] = 1e-3,
|
||||
betas: Tuple[float, float] = (0.9, 0.999),
|
||||
eps: float = 1e-8,
|
||||
|
@ -1,11 +1,11 @@
|
||||
from typing import Tuple
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class Adamax(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
betas: Tuple[float, float] = ...,
|
||||
eps: float = ...,
|
||||
|
@ -2,7 +2,7 @@ import torch
|
||||
from torch import Tensor
|
||||
from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt,
|
||||
_stack_if_compiling, _capturable_doc, _differentiable_doc, _foreach_doc,
|
||||
_fused_doc, _maximize_doc, _default_to_fused_or_foreach, params_t)
|
||||
_fused_doc, _maximize_doc, _default_to_fused_or_foreach, ParamsT)
|
||||
from typing import List, Optional, Tuple, Union
|
||||
from torch.utils._foreach_utils import _get_fused_kernels_supported_devices
|
||||
|
||||
@ -12,7 +12,7 @@ __all__ = ["AdamW", "adamw"]
|
||||
class AdamW(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: Union[float, Tensor] = 1e-3,
|
||||
betas: Tuple[float, float] = (0.9, 0.999),
|
||||
eps: float = 1e-8,
|
||||
|
@ -2,12 +2,12 @@ from typing import Optional, Tuple, Union
|
||||
|
||||
from torch import Tensor
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class AdamW(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: Union[float, Tensor] = 1e-3,
|
||||
betas: Tuple[float, float] = (0.9, 0.999),
|
||||
eps: float = 1e-8,
|
||||
|
@ -1,9 +1,9 @@
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class ASGD(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
lambd: float = ...,
|
||||
alpha: float = ...,
|
||||
|
@ -1,11 +1,11 @@
|
||||
from typing import Optional
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class LBFGS(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
max_iter: int = ...,
|
||||
max_eval: Optional[int] = ...,
|
||||
|
@ -1,11 +1,11 @@
|
||||
from typing import Tuple
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class NAdam(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
betas: Tuple[float, float] = ...,
|
||||
eps: float = ...,
|
||||
|
@ -204,7 +204,7 @@ def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> Removabl
|
||||
_global_optimizer_post_hooks[handle.id] = hook
|
||||
return handle
|
||||
|
||||
params_t: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]
|
||||
ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]]
|
||||
|
||||
_P = ParamSpec("_P")
|
||||
R = TypeVar("R")
|
||||
@ -236,7 +236,7 @@ class Optimizer:
|
||||
_optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]'
|
||||
_optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]'
|
||||
|
||||
def __init__(self, params: params_t, defaults: Dict[str, Any]) -> None:
|
||||
def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None:
|
||||
torch._C._log_api_usage_once("python.optimizer")
|
||||
self.defaults = defaults
|
||||
self._optimizer_step_pre_hooks = OrderedDict()
|
||||
|
@ -1,11 +1,11 @@
|
||||
from typing import Tuple
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class RAdam(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
betas: Tuple[float, float] = ...,
|
||||
eps: float = ...,
|
||||
|
@ -1,9 +1,9 @@
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class RMSprop(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
alpha: float = ...,
|
||||
eps: float = ...,
|
||||
|
@ -1,11 +1,11 @@
|
||||
from typing import Tuple
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class Rprop(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
etas: Tuple[float, float] = ...,
|
||||
step_sizes: Tuple[float, float] = ...,
|
||||
|
@ -1,9 +1,9 @@
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class SGD(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float,
|
||||
momentum: float = ...,
|
||||
dampening: float = ...,
|
||||
|
@ -1,11 +1,11 @@
|
||||
from typing import Tuple
|
||||
|
||||
from .optimizer import Optimizer, params_t
|
||||
from .optimizer import Optimizer, ParamsT
|
||||
|
||||
class SparseAdam(Optimizer):
|
||||
def __init__(
|
||||
self,
|
||||
params: params_t,
|
||||
params: ParamsT,
|
||||
lr: float = ...,
|
||||
betas: Tuple[float, float] = ...,
|
||||
eps: float = ...,
|
||||
|
Reference in New Issue
Block a user