mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[2/N] Apply py39 ruff fixes (#141938)
Fixes #ISSUE_NUMBER Pull Request resolved: https://github.com/pytorch/pytorch/pull/141938 Approved by: https://github.com/ezyang
This commit is contained in:
@ -1063,13 +1063,6 @@ def gen_pyi(
|
||||
# NB: Keep this in sync with enum in aten/src/ATen/core/Reduction.h
|
||||
hint = hint.replace("at::Reduction::Mean", "1")
|
||||
hint = hint.replace(": Tensor = None", ": Optional[Tensor] = None")
|
||||
# Match both:
|
||||
# ": Union[Tensor, Tuple[Tensor, ...], List[Tensor]] = None"
|
||||
# ": Union[Tuple[Tensor, ...], List[Tensor]] = None"
|
||||
hint = hint.replace(
|
||||
"Tuple[Tensor, ...], List[Tensor]] = None",
|
||||
"Tuple[Tensor, ...], List[Tensor], None] = None",
|
||||
)
|
||||
return hint
|
||||
|
||||
docstrs = gather_docstrs()
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from typing import Any, Optional
|
||||
|
||||
import numpy as np
|
||||
from sklearn.tree import _tree # type: ignore[import-untyped]
|
||||
@ -34,10 +34,10 @@ class DecisionTree:
|
||||
does not seem to be easy with sklearn.
|
||||
"""
|
||||
|
||||
def __init__(self, sklearn_tree: Any, feature_names: List[str]) -> None:
|
||||
def __init__(self, sklearn_tree: Any, feature_names: list[str]) -> None:
|
||||
self.feature_names = feature_names
|
||||
self.root = self._convert_sklearn_tree(sklearn_tree.tree_)
|
||||
self.classes_: List[str] = sklearn_tree.classes_
|
||||
self.classes_: list[str] = sklearn_tree.classes_
|
||||
|
||||
def _convert_sklearn_tree(
|
||||
self, sklearn_tree: Any, node_id: int = 0
|
||||
@ -193,9 +193,9 @@ class DecisionTree:
|
||||
|
||||
def codegen(
|
||||
self,
|
||||
dummy_col_2_col_val: Dict[str, Tuple[str, Any]],
|
||||
lines: List[str],
|
||||
unsafe_leaves: List[int],
|
||||
dummy_col_2_col_val: dict[str, tuple[str, Any]],
|
||||
lines: list[str],
|
||||
unsafe_leaves: list[int],
|
||||
) -> None:
|
||||
# generates python code for the decision tree
|
||||
def codegen_node(node: DecisionTreeNode, depth: int) -> None:
|
||||
@ -223,7 +223,7 @@ class DecisionTree:
|
||||
codegen_node(node.right, depth + 1)
|
||||
|
||||
def handle_leaf(
|
||||
node: DecisionTreeNode, indent: str, unsafe_leaves: List[int]
|
||||
node: DecisionTreeNode, indent: str, unsafe_leaves: list[int]
|
||||
) -> str:
|
||||
"""
|
||||
This generates the code for a leaf node in the decision tree. If the leaf is unsafe, the learned heuristic
|
||||
|
@ -2,7 +2,7 @@ import argparse
|
||||
import random
|
||||
import time
|
||||
from abc import abstractmethod
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
from tqdm import tqdm # type: ignore[import-untyped]
|
||||
|
||||
@ -71,7 +71,7 @@ class BenchmarkRunner:
|
||||
def run_benchmark(self, *args: Any) -> None: ...
|
||||
|
||||
@abstractmethod
|
||||
def create_input(self) -> Tuple[Any, ...]: ...
|
||||
def create_input(self) -> tuple[Any, ...]: ...
|
||||
|
||||
def main(self, num_samples: int, num_reps: int) -> None:
|
||||
for _ in tqdm(range(num_samples)):
|
||||
|
@ -1,10 +1,10 @@
|
||||
import random
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
|
||||
|
||||
def transpose_tensors(p_transpose_both: float = 0.05) -> Tuple[bool, bool]:
|
||||
def transpose_tensors(p_transpose_both: float = 0.05) -> tuple[bool, bool]:
|
||||
transpose_both = random.choices(
|
||||
[True, False], [p_transpose_both, 1 - p_transpose_both]
|
||||
)[0]
|
||||
@ -31,7 +31,7 @@ def get_mm_tensors(
|
||||
transpose_right: bool,
|
||||
dtype_left: Any,
|
||||
dtype_right: Any,
|
||||
) -> Tuple[Any, Any]:
|
||||
) -> tuple[Any, Any]:
|
||||
if transpose_left:
|
||||
a = torch.randn(k, m, dtype=dtype_left).t()
|
||||
else:
|
||||
|
@ -1,14 +1,13 @@
|
||||
import sys
|
||||
from typing import List
|
||||
|
||||
|
||||
def merge_txt_files(file_list: List[str], output_file: str) -> None:
|
||||
def merge_txt_files(file_list: list[str], output_file: str) -> None:
|
||||
if not file_list:
|
||||
print("No input files provided.")
|
||||
return
|
||||
|
||||
metadata: List[str] = []
|
||||
content: List[str] = []
|
||||
metadata: list[str] = []
|
||||
content: list[str] = []
|
||||
|
||||
# Read metadata and content from all files
|
||||
for file_path in file_list:
|
||||
|
@ -6,7 +6,7 @@ import sys
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found]
|
||||
from benchmark_utils import ( # type: ignore[import-not-found]
|
||||
@ -33,7 +33,7 @@ class BenchmarkRunnerMixedMM(BenchmarkRunner): # type: ignore[misc, no-any-unim
|
||||
def __init__(self) -> None:
|
||||
super().__init__("mixed_mm")
|
||||
|
||||
def create_input(self) -> Tuple[Any, ...]:
|
||||
def create_input(self) -> tuple[Any, ...]:
|
||||
dtype1, dtype2 = self.get_dtypes()
|
||||
m, k, n = self.get_m_k_n(dtype1)
|
||||
transpose_left, transpose_right = False, True
|
||||
@ -109,7 +109,7 @@ class BenchmarkRunnerMixedMM(BenchmarkRunner): # type: ignore[misc, no-any-unim
|
||||
else:
|
||||
return get_random_between_pow2(1, 7)
|
||||
|
||||
def get_m_k_n(self, dtype: Any) -> Tuple[int, int, int]:
|
||||
def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
|
||||
numel_max = 2**31
|
||||
|
||||
# repeat until tensors fit in memory
|
||||
|
@ -24,7 +24,7 @@ class TestMixedMM(TestCase):
|
||||
# fmt: off
|
||||
# This file was generated by AutoHeuristic. Do not modify it manually!
|
||||
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import Optional
|
||||
|
||||
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
||||
AHContext,
|
||||
@ -39,7 +39,7 @@ from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
||||
class MixedMMA100(LearnedHeuristicDecision):
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.choices: List[Choice] = []
|
||||
self.choices: list[Choice] = []
|
||||
self.fill_choices()
|
||||
|
||||
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
|
||||
@ -84,7 +84,7 @@ class MixedMMA100(LearnedHeuristicDecision):
|
||||
def get_name(self) -> str:
|
||||
return 'mixed_mm'
|
||||
|
||||
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:
|
||||
def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:
|
||||
if str(context.get_value('1LEQmLEQ16')) != 'True':
|
||||
if context.get_value('m') <= 32.5:
|
||||
if context.get_value('n') <= 6976.0:
|
||||
@ -186,7 +186,7 @@ class MixedMMA100(LearnedHeuristicDecision):
|
||||
# fmt: off
|
||||
# This file was generated by AutoHeuristic. Do not modify it manually!
|
||||
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/mixed_mm/
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import Optional
|
||||
|
||||
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
||||
AHContext,
|
||||
@ -201,7 +201,7 @@ from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
||||
class MixedMMH100(LearnedHeuristicDecision):
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.choices: List[Choice] = []
|
||||
self.choices: list[Choice] = []
|
||||
self.fill_choices()
|
||||
|
||||
def check_precondition(self, metadata: AHMetadata, context: AHContext,) -> bool:
|
||||
@ -245,7 +245,7 @@ class MixedMMH100(LearnedHeuristicDecision):
|
||||
def get_name(self) -> str:
|
||||
return 'mixed_mm'
|
||||
|
||||
def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:
|
||||
def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:
|
||||
if context.get_value('arith_intensity') <= 15.988086223602295:
|
||||
if context.get_value('n') <= 25280.0:
|
||||
if context.get_value('n') <= 1344.0:
|
||||
|
@ -6,7 +6,7 @@ import sys
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found]
|
||||
from benchmark_utils import ( # type: ignore[import-not-found]
|
||||
@ -28,7 +28,7 @@ class BenchmarkRunnerMM(BenchmarkRunner): # type: ignore[misc, no-any-unimporte
|
||||
def __init__(self) -> None:
|
||||
super().__init__("mm")
|
||||
|
||||
def create_input(self) -> Tuple[Any, ...]:
|
||||
def create_input(self) -> tuple[Any, ...]:
|
||||
dtype = random.choices([torch.float32, torch.float16, torch.bfloat16])[0]
|
||||
set_precision(dtype)
|
||||
m, k, n = self.get_m_k_n(dtype)
|
||||
@ -100,7 +100,7 @@ class BenchmarkRunnerMM(BenchmarkRunner): # type: ignore[misc, no-any-unimporte
|
||||
print(f"random_type {distr_type} not supported")
|
||||
sys.exit(1)
|
||||
|
||||
def get_m_k_n(self, dtype: Any) -> Tuple[int, int, int]:
|
||||
def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
|
||||
numel_max = 2**31
|
||||
|
||||
# repeat until tensors fit in memory
|
||||
|
@ -5,7 +5,7 @@ import sys
|
||||
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
from benchmark_runner import BenchmarkRunner # type: ignore[import-not-found]
|
||||
from benchmark_utils import ( # type: ignore[import-not-found]
|
||||
@ -30,7 +30,7 @@ class BenchmarkRunnerPadMM(BenchmarkRunner): # type: ignore[misc, no-any-unimpo
|
||||
def __init__(self) -> None:
|
||||
super().__init__("pad_mm")
|
||||
|
||||
def create_input(self) -> Tuple[Any, ...]:
|
||||
def create_input(self) -> tuple[Any, ...]:
|
||||
dtype = self.get_dtype()
|
||||
set_precision(dtype)
|
||||
m, k, n = self.get_m_k_n(dtype)
|
||||
@ -113,7 +113,7 @@ class BenchmarkRunnerPadMM(BenchmarkRunner): # type: ignore[misc, no-any-unimpo
|
||||
def is_aligned(self, dim: int, align_size: int) -> bool:
|
||||
return dim % align_size == 0
|
||||
|
||||
def get_m_k_n(self, dtype: Any) -> Tuple[int, int, int]:
|
||||
def get_m_k_n(self, dtype: Any) -> tuple[int, int, int]:
|
||||
uniform = random.choices([True, False])[0]
|
||||
align_size = get_alignment_size_dtype(dtype)
|
||||
|
||||
|
@ -531,7 +531,7 @@ class AHTrainDecisionTree(AHTrain):
|
||||
"""
|
||||
Generates the definition of the predict function.
|
||||
"""
|
||||
return "def get_best_choices(self, context: AHContext) -> Optional[List[Tuple[float, int]]]:"
|
||||
return "def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:"
|
||||
|
||||
def codegen_boilerplate(
|
||||
self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes
|
||||
@ -545,7 +545,7 @@ class AHTrainDecisionTree(AHTrain):
|
||||
# fmt: off
|
||||
# This file was generated by AutoHeuristic. Do not modify it manually!
|
||||
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/
|
||||
from typing import List, Optional, Tuple
|
||||
from typing import Optional
|
||||
|
||||
from torch._inductor.autoheuristic.autoheuristic_utils import (
|
||||
AHContext,
|
||||
@ -560,7 +560,7 @@ from torch._inductor.autoheuristic.learnedheuristic_interface import (
|
||||
class {heuristic_name}(LearnedHeuristicDecision):
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.choices: List[Choice] = []
|
||||
self.choices: list[Choice] = []
|
||||
self.fill_choices()
|
||||
|
||||
{self.gen_precondition(opt_name, shared_memory, device_capa)}
|
||||
|
@ -80,7 +80,7 @@ def process_ir_type(
|
||||
(3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef)
|
||||
|
||||
(1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.)
|
||||
There is special handling for Optional[Tensor] or List[Tensor], etc- hence 'tensor-like'
|
||||
There is special handling for Optional[Tensor] or list[Tensor], etc- hence 'tensor-like'
|
||||
|
||||
This is incomplete- there are assertions in places that it's expected to need to add
|
||||
more types as the codegen is used with more operators.
|
||||
|
@ -956,14 +956,13 @@ def argument_type_str_pyi(t: Type) -> str:
|
||||
ret = "Union[_int, _size]" if t.size is not None else "_size"
|
||||
elif t.is_tensor_like():
|
||||
# TODO: this doesn't seem right...
|
||||
# Tensor?[] currently translates to Optional[Union[Tuple[Tensor, ...], List[Tensor]]]
|
||||
# It should probably translate to Union[Tuple[Optional[Tensor], ...], List[Optional[Tensor]]]
|
||||
if isinstance(t.elem, OptionalType):
|
||||
add_optional = True
|
||||
# Tensor?[] currently translates to Optional[Union[tuple[Tensor, ...], list[Tensor]]]
|
||||
# It should probably translate to Union[tuple[Optional[Tensor], ...], list[Optional[Tensor]]]
|
||||
add_optional = True
|
||||
ret = (
|
||||
"Union[Tensor, Tuple[Tensor, ...], List[Tensor]]"
|
||||
"Union[Tensor, tuple[Tensor, ...], list[Tensor]]"
|
||||
if t.size is not None
|
||||
else "Union[Tuple[Tensor, ...], List[Tensor]]"
|
||||
else "Union[tuple[Tensor, ...], list[Tensor]]"
|
||||
)
|
||||
elif str(t.elem) == "float":
|
||||
ret = "Sequence[_float]"
|
||||
@ -1001,7 +1000,7 @@ def return_type_str_pyi(t: Type) -> str:
|
||||
|
||||
if isinstance(t, ListType):
|
||||
inner = return_type_str_pyi(t.elem)
|
||||
return f"Tuple[{inner}, ...]"
|
||||
return f"tuple[{inner}, ...]"
|
||||
|
||||
return argument_type_str_pyi(t)
|
||||
|
||||
@ -1014,7 +1013,7 @@ def returns_structseq_pyi(signature: PythonSignature) -> tuple[str, str] | None:
|
||||
# These types are structseq objects which act like named NamedTuples, but
|
||||
# the constructor acts like the constructor of tuple. Using typing.NamedTuple
|
||||
# does not allow us to override __init__.
|
||||
seq_type = f"Tuple[{', '.join(python_returns)}]"
|
||||
seq_type = f"tuple[{', '.join(python_returns)}]"
|
||||
structseq_def_lines = [
|
||||
f"class {structseq_name}({seq_type}):",
|
||||
]
|
||||
@ -1038,12 +1037,12 @@ def returns_structseq_pyi(signature: PythonSignature) -> tuple[str, str] | None:
|
||||
structseq_def = "\n".join(structseq_def_lines)
|
||||
# Example:
|
||||
# structseq_def = (
|
||||
# "class max(Tuple[Tensor, Tensor]):\n"
|
||||
# "class max(tuple[Tensor, Tensor]):\n"
|
||||
# " @property\n"
|
||||
# " def values(self) -> Tensor: ...\n"
|
||||
# " @property\n"
|
||||
# " def indices(self) -> Tensor: ...\n"
|
||||
# " def __new__(cls, sequence: Tuple[Tensor, Tensor]): ...\n"
|
||||
# " def __new__(cls, sequence: tuple[Tensor, Tensor]): ...\n"
|
||||
# " n_fields: _int = 2",
|
||||
# " n_sequeunce_fields: _int = 2",
|
||||
# " n_unnamed_fields: _int = 0",
|
||||
@ -1060,7 +1059,7 @@ def returns_str_pyi(signature: PythonSignature) -> str:
|
||||
|
||||
python_returns = [return_type_str_pyi(r.type) for r in signature.returns.returns]
|
||||
if len(python_returns) > 1:
|
||||
return "Tuple[" + ", ".join(python_returns) + "]"
|
||||
return "tuple[" + ", ".join(python_returns) + "]"
|
||||
if len(python_returns) == 1:
|
||||
return python_returns[0]
|
||||
return "None"
|
||||
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import functools
|
||||
from typing import Any, Callable, List, Optional, Tuple, TYPE_CHECKING, TypeVar, Union
|
||||
from typing import Any, Callable, Optional, TYPE_CHECKING, TypeVar, Union
|
||||
|
||||
import torchgen.local as local
|
||||
from torchgen.model import (
|
||||
@ -39,7 +39,7 @@ F2 = TypeVar(
|
||||
str,
|
||||
)
|
||||
|
||||
F3 = TypeVar("F3", Tuple[NativeFunction, Any], List[NativeFunction])
|
||||
F3 = TypeVar("F3", tuple[NativeFunction, Any], list[NativeFunction])
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Represents all kernels used by an Executorch model.
|
||||
# It maintains a Dict[OperatorName, Dict[ETKernelKey, BackendMetadata]] structure.
|
||||
# It maintains a dict[OperatorName, dict[ETKernelKey, BackendMetadata]] structure.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Any, Optional, Tuple, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from torchgen.model import (
|
||||
Annotation,
|
||||
@ -80,8 +80,8 @@ class FunctionSchemaGen:
|
||||
@staticmethod
|
||||
def from_example(
|
||||
op_name: str,
|
||||
example_inputs: Tuple[Tuple[str, Any], ...],
|
||||
example_outputs: Tuple[Any, ...],
|
||||
example_inputs: tuple[tuple[str, Any], ...],
|
||||
example_outputs: tuple[Any, ...],
|
||||
) -> FunctionSchema:
|
||||
args = []
|
||||
for name, inp in example_inputs:
|
||||
|
@ -5,7 +5,7 @@ import itertools
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from enum import auto, Enum
|
||||
from typing import Callable, List, TYPE_CHECKING
|
||||
from typing import Callable, TYPE_CHECKING
|
||||
|
||||
from torchgen.utils import assert_never, NamespaceHelper, OrderedSet
|
||||
|
||||
@ -249,7 +249,7 @@ class _TorchDispatchModeKey(Enum):
|
||||
|
||||
|
||||
def codegen_per_backend_entries() -> str:
|
||||
r: List[str] = []
|
||||
r: list[str] = []
|
||||
for fk in FUNCTIONALITY_KEYS:
|
||||
r.extend(f" {fk}{bc} = auto()" for bc in BACKEND_COMPONENTS)
|
||||
return "\n".join(r)
|
||||
@ -1518,7 +1518,7 @@ class FunctionSchema:
|
||||
and self.returns[0].annotation == self_a.argument.annotation
|
||||
)
|
||||
else:
|
||||
# You can't method chain on non-tensor self arguments though (like a List[Tensor])
|
||||
# You can't method chain on non-tensor self arguments though (like a list[Tensor])
|
||||
# so in all other cases we expect the return type to be none.
|
||||
assert len(self.returns) == 0
|
||||
|
||||
|
Reference in New Issue
Block a user