mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[4/N] Apply py39 ruff and pyupgrade fixes (#143257)
```torch/fx/passes/annotate_getitem_nodes.py``` was changed to support the new type hinting annotations. Pull Request resolved: https://github.com/pytorch/pytorch/pull/143257 Approved by: https://github.com/justinchuby, https://github.com/albanD
This commit is contained in:
@ -4,7 +4,7 @@ import math
|
||||
import tempfile
|
||||
import unittest
|
||||
from copy import deepcopy
|
||||
from typing import Any, Dict, Tuple
|
||||
from typing import Any
|
||||
from unittest.mock import patch
|
||||
|
||||
from optim.test_lrscheduler import TestLRScheduler # noqa: F401
|
||||
@ -1769,8 +1769,8 @@ class TestOptimRenewed(TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _state_dict_post_hook(
|
||||
optimizer: Optimizer, state_dict: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
optimizer: Optimizer, state_dict: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
if "test" in state_dict["state"]:
|
||||
state_dict["state"].pop("test")
|
||||
state_dict["ran_state_dict_pre_hook"] = True
|
||||
@ -1821,14 +1821,14 @@ class TestOptimRenewed(TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _load_state_dict_pre_hook1(
|
||||
optimizer: Optimizer, state_dict: Dict[str, Any]
|
||||
optimizer: Optimizer, state_dict: dict[str, Any]
|
||||
) -> None:
|
||||
state_dict["param_groups"][0]["lr"] = 0.002
|
||||
|
||||
@staticmethod
|
||||
def _load_state_dict_pre_hook2(
|
||||
optimizer: Optimizer, state_dict: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
optimizer: Optimizer, state_dict: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
# The typical use case for returning a state dict is to drastically modify the state dict.
|
||||
# I will simulate by simply making a deep copy and ensuring that my_state_dict still gets used
|
||||
my_state_dict = deepcopy(state_dict)
|
||||
@ -1906,7 +1906,7 @@ class TestOptimRenewed(TestCase):
|
||||
|
||||
@optims(optim_db, dtypes=[torch.float32])
|
||||
def test_step_post_hook(self, device, dtype, optim_info):
|
||||
def post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
|
||||
def post_hook(opt: Optimizer, args: tuple[Any], kwargs: dict[Any, Any]):
|
||||
nonlocal data
|
||||
data += 2
|
||||
|
||||
@ -1938,7 +1938,7 @@ class TestOptimRenewed(TestCase):
|
||||
|
||||
@optims(optim_db, dtypes=[torch.float32])
|
||||
def test_step_pre_hook(self, device, dtype, optim_info):
|
||||
def pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
|
||||
def pre_hook(opt: Optimizer, args: tuple[Any], kwargs: dict[Any, Any]):
|
||||
nonlocal data
|
||||
data += 2
|
||||
|
||||
@ -1970,19 +1970,19 @@ class TestOptimRenewed(TestCase):
|
||||
|
||||
@optims(optim_db, dtypes=[torch.float32])
|
||||
def test_step_all_hooks(self, device, dtype, optim_info):
|
||||
def global_pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
|
||||
def global_pre_hook(opt: Optimizer, args: tuple[Any], kwargs: dict[Any, Any]):
|
||||
nonlocal data
|
||||
data.append(0)
|
||||
|
||||
def global_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
|
||||
def global_post_hook(opt: Optimizer, args: tuple[Any], kwargs: dict[Any, Any]):
|
||||
nonlocal data
|
||||
data.append(5)
|
||||
|
||||
def local_pre_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
|
||||
def local_pre_hook(opt: Optimizer, args: tuple[Any], kwargs: dict[Any, Any]):
|
||||
nonlocal data
|
||||
data.append(1)
|
||||
|
||||
def local_post_hook(opt: Optimizer, args: Tuple[Any], kwargs: Dict[Any, Any]):
|
||||
def local_post_hook(opt: Optimizer, args: tuple[Any], kwargs: dict[Any, Any]):
|
||||
nonlocal data
|
||||
data.append(2)
|
||||
|
||||
|
Reference in New Issue
Block a user