diff --git a/.lintrunner.toml b/.lintrunner.toml index 0ed2cd453b6f..5a661d383542 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -38,7 +38,7 @@ init_command = [ '--dry-run={{DRYRUN}}', 'flake8==6.1.0', 'flake8-bugbear==23.3.23', - 'flake8-comprehensions==3.12.0', + 'flake8-comprehensions==3.15.0', 'flake8-executable==2.1.3', 'flake8-logging-format==0.9.0', 'flake8-pyi==23.3.1', diff --git a/test/inductor/test_max_autotune.py b/test/inductor/test_max_autotune.py index 7979cb552d63..2c4f635ddbc4 100644 --- a/test/inductor/test_max_autotune.py +++ b/test/inductor/test_max_autotune.py @@ -322,7 +322,7 @@ class TestMaxAutotune(TestCase): return None fake_choices = [FakeChoiceCaller() for i in range(10)] - fake_lookup_result = {choice: 0.123 for choice in fake_choices} + fake_lookup_result = dict.fromkeys(fake_choices, 0.123) def no_lookup( choices: List[ChoiceCaller], diff --git a/test/jit/test_list_dict.py b/test/jit/test_list_dict.py index 637bd57853cb..47173d56653e 100644 --- a/test/jit/test_list_dict.py +++ b/test/jit/test_list_dict.py @@ -1645,9 +1645,9 @@ class TestDict(JitTestCase): def test_dictcomprehension_is_typed_from_annotation(): metasyntactics = ["foo", "bar", "baz"] - x: Dict[str, Optional[int]] = { + x: Dict[str, Optional[int]] = { # noqa: C420, RUF025 word: None for word in metasyntactics - } # noqa: RUF025 + } return x self.checkScript(test_dictcomprehension_is_typed_from_annotation, ()) diff --git a/torch/_dynamo/variables/nn_module.py b/torch/_dynamo/variables/nn_module.py index 909bde78a5ea..f92458337672 100644 --- a/torch/_dynamo/variables/nn_module.py +++ b/torch/_dynamo/variables/nn_module.py @@ -921,7 +921,7 @@ class UnspecializedNNModuleVariable(UserDefinedObjectVariable): params_list = collect_parameters(self, recurse=recurse) # Account for duplicated params - deduplicated_params = list({param: None for param in params_list}.keys()) + deduplicated_params = list(dict.fromkeys(params_list).keys()) return variables.ListIteratorVariable( deduplicated_params, mutable_local=MutableLocal() diff --git a/torch/_inductor/dependencies.py b/torch/_inductor/dependencies.py index 5d5f54a035a9..2bcda6dcaef9 100644 --- a/torch/_inductor/dependencies.py +++ b/torch/_inductor/dependencies.py @@ -69,7 +69,7 @@ class MemoryDep(Dep): """ Return the offset by setting every variable to be 0. """ - return sympy_subs(self.index, {v: 0 for v in self.var_names}) + return sympy_subs(self.index, dict.fromkeys(self.var_names, 0)) def normalize_with_stride_order(self, prefix="t"): r""" diff --git a/torch/distributed/_tools/mem_tracker.py b/torch/distributed/_tools/mem_tracker.py index e2439e22e115..c793f3e9b264 100644 --- a/torch/distributed/_tools/mem_tracker.py +++ b/torch/distributed/_tools/mem_tracker.py @@ -439,7 +439,7 @@ class MemTracker(TorchDispatchMode): maybe_zero = False # Ensure the device entry exists in the current memory snapshot, initializing if necessary. dev_snap = self._curr_mem_snap.setdefault( - winfo.device, {reftype: 0 for reftype in self._ref_class} + winfo.device, dict.fromkeys(self._ref_class, 0) ) dev_snap.setdefault(_TOTAL_KEY, 0) # Handle different types of updates based on the update type (`u_type`). diff --git a/torch/fx/passes/infra/partitioner.py b/torch/fx/passes/infra/partitioner.py index e308ab81f0bd..271f90a7b75e 100644 --- a/torch/fx/passes/infra/partitioner.py +++ b/torch/fx/passes/infra/partitioner.py @@ -18,7 +18,7 @@ logger.setLevel(logging.WARNING) class Partition: def __init__(self, id: Optional[int] = None, nodes: Optional[Iterable[Node]] = None): self.id = id - self.nodes = {node: None for node in nodes} if nodes is not None else {} + self.nodes = dict.fromkeys(nodes) if nodes is not None else {} def __repr__(self) -> str: return str(self.nodes)