From b020971e7806bba39aecf636e59e743911831ad8 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Wed, 18 Jun 2025 00:11:01 +0800 Subject: [PATCH] [BE] fix typos in torchgen/ (#156083) Pull Request resolved: https://github.com/pytorch/pytorch/pull/156083 Approved by: https://github.com/jingsh ghstack dependencies: #156079, #156082 --- .lintrunner.toml | 1 - torchgen/_autoheuristic/README.md | 2 +- torchgen/_autoheuristic/benchmark_utils.py | 2 +- torchgen/_autoheuristic/collect_data.sh | 2 +- .../_autoheuristic/mixed_mm/generate_heuristic_mixedmm.sh | 2 +- torchgen/_autoheuristic/pad_mm/generate_heuristic_pad_mm.sh | 2 +- torchgen/api/autograd.py | 6 +++--- torchgen/api/cpp.py | 2 +- torchgen/api/types/signatures.py | 2 +- torchgen/dest/native_functions.py | 2 +- torchgen/dest/register_dispatch_key.py | 4 ++-- torchgen/dest/ufunc.py | 2 +- torchgen/gen.py | 4 ++-- torchgen/gen_functionalization_type.py | 4 ++-- torchgen/gen_schema_utils.py | 2 +- torchgen/model.py | 2 +- torchgen/shape_functions/gen_jit_shape_functions.py | 2 +- 17 files changed, 21 insertions(+), 22 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index 6712709230f5..6382e5c740e5 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -1165,7 +1165,6 @@ exclude_patterns = [ 'scripts/**', 'test/**', 'torch/**', - 'torchgen/**', ] init_command = [ 'python3', diff --git a/torchgen/_autoheuristic/README.md b/torchgen/_autoheuristic/README.md index 58613e54fb87..2241785c2983 100644 --- a/torchgen/_autoheuristic/README.md +++ b/torchgen/_autoheuristic/README.md @@ -89,7 +89,7 @@ context = AHContext() context.add_feature("m", mat1.shape[0]) context.add_feature("k", mat1.shape[1]) -# adding a categorical feture +# adding a categorical feature context.add_feature("mat1_dtype", mat1.dtype, is_categorical=True) ``` diff --git a/torchgen/_autoheuristic/benchmark_utils.py b/torchgen/_autoheuristic/benchmark_utils.py index f0161065a3a0..ad75c6715dd7 100644 --- a/torchgen/_autoheuristic/benchmark_utils.py +++ b/torchgen/_autoheuristic/benchmark_utils.py @@ -18,7 +18,7 @@ def transpose_tensors(p_transpose_both: float = 0.05) -> tuple[bool, bool]: def fits_in_memory(dtype: Any, m: int, k: int, n: int) -> Any: threshold_memory = torch.cuda.get_device_properties(0).total_memory / 4 - # dividing by 4 beause we otherwise sometimes run out of memory, I assume because + # dividing by 4 because we otherwise sometimes run out of memory, I assume because # inductor creates copies of tensors for benchmarking? return dtype.itemsize * (m * k + k * n + m * n) < threshold_memory diff --git a/torchgen/_autoheuristic/collect_data.sh b/torchgen/_autoheuristic/collect_data.sh index 442f6120327f..73b6364829b9 100644 --- a/torchgen/_autoheuristic/collect_data.sh +++ b/torchgen/_autoheuristic/collect_data.sh @@ -1,6 +1,6 @@ #!/bin/bash -# this script makes it easy parallize collecting data across using multiple GPUs +# This script makes it easy to parallelize data collection across multiple GPUs # Check if tmux is installed if ! command -v tmux &> /dev/null; then diff --git a/torchgen/_autoheuristic/mixed_mm/generate_heuristic_mixedmm.sh b/torchgen/_autoheuristic/mixed_mm/generate_heuristic_mixedmm.sh index dd6ac78e9dfb..27a671511bea 100644 --- a/torchgen/_autoheuristic/mixed_mm/generate_heuristic_mixedmm.sh +++ b/torchgen/_autoheuristic/mixed_mm/generate_heuristic_mixedmm.sh @@ -12,7 +12,7 @@ MODE=$1 # !!! SPECIFY THE GPUs THAT YOU WANT TO USE HERE !!! GPU_DEVICE_IDS="4,5" -# !!! SPECIFY THE CONDA ENVIRONEMNT THAT YOU WANT TO BE ACTIVATED HERE !!! +# !!! SPECIFY THE CONDA ENVIRONMENT THAT YOU WANT TO BE ACTIVATED HERE !!! CONDA_ENV=heuristic-pr NUM_SAMPLES=2000 diff --git a/torchgen/_autoheuristic/pad_mm/generate_heuristic_pad_mm.sh b/torchgen/_autoheuristic/pad_mm/generate_heuristic_pad_mm.sh index d7cb6b99164c..b7dac53179be 100644 --- a/torchgen/_autoheuristic/pad_mm/generate_heuristic_pad_mm.sh +++ b/torchgen/_autoheuristic/pad_mm/generate_heuristic_pad_mm.sh @@ -12,7 +12,7 @@ MODE=$1 # !!! SPECIFY THE GPUs THAT YOU WANT TO USE HERE !!! GPU_DEVICE_IDS="4,5" -# !!! SPECIFY THE CONDA ENVIRONEMNT THAT YOU WANT TO BE ACTIVATED HERE !!! +# !!! SPECIFY THE CONDA ENVIRONMENT THAT YOU WANT TO BE ACTIVATED HERE !!! CONDA_ENV=heuristic-pr NUM_SAMPLES=2000 diff --git a/torchgen/api/autograd.py b/torchgen/api/autograd.py index 3f3b793825c9..96e192d3a48a 100644 --- a/torchgen/api/autograd.py +++ b/torchgen/api/autograd.py @@ -93,7 +93,7 @@ class ForwardDerivative: # This is only used by inplace operations required_original_self_value: bool - # If this formula is specified in derivatives.yaml or if we are re-using the + # If this formula is specified in derivatives.yaml or if we are reusing the # out of place formula for inplace is_reusing_outplace_formula: bool @@ -632,7 +632,7 @@ def match_differentiability_info( info_dict = non_functional_info_by_signature[f_sig] # See https://github.com/pytorch/pytorch/pull/76320/files#r874816389 assert not any( - any("self" in str(inpt.nctype.name) for inpt in info.all_saved_inputs) + any("self" in str(input.nctype.name) for input in info.all_saved_inputs) for info in info_dict.values() ), f"""\ Attempted to convert a derivative formula for a mutable operator @@ -699,7 +699,7 @@ Attempted to convert a derivative formula for a mutable operator # we make sure that the original value of the input that is being modified inplace (self_p) is # not used in the formula. Note that the formula can use "original_self_p" here and that would # trigger a clone of the original input. - # - If we are re-using the out of place formula (is_exact_match == False) then we replace every + # - If we are reusing the out of place formula (is_exact_match == False) then we replace every # occurrence of self_p and self_t by original_self_p and original_self_t. These will be # populated by cloned version of the original input (either the clone done by the backward AD # logic if self is also used in a backward formula or a special clone that we add). diff --git a/torchgen/api/cpp.py b/torchgen/api/cpp.py index c619ec45d2f8..862cef30dba4 100644 --- a/torchgen/api/cpp.py +++ b/torchgen/api/cpp.py @@ -127,7 +127,7 @@ def valuetype_type( # Translation of types occurring in JIT arguments to a C++ argument type. -# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type. +# If remove_non_owning_ref_types is set, we'll guarantee that the output CType is not a non-owning reference type. # For example, we'll return std::vector instead of IntArrayRef. # See Note [translation from C++ reference to value types] def argumenttype_type( diff --git a/torchgen/api/types/signatures.py b/torchgen/api/types/signatures.py index 384eeeb8e483..b3856e65e700 100644 --- a/torchgen/api/types/signatures.py +++ b/torchgen/api/types/signatures.py @@ -49,7 +49,7 @@ class CppSignature: # Is this a fallback C++ binding? Fallback bindings are enabled by # manual_cpp_binding: True and are alternate, non-public API that - # lets manual C++ binding implementors access the binding that would + # lets manual C++ binding implementers access the binding that would # have been automatically generated fallback_binding: bool = False diff --git a/torchgen/dest/native_functions.py b/torchgen/dest/native_functions.py index b1488b4f1887..05e252d09f9c 100644 --- a/torchgen/dest/native_functions.py +++ b/torchgen/dest/native_functions.py @@ -12,7 +12,7 @@ def torch_api_key_word_prefix(bankend_index: BackendIndex) -> str: if bankend_index.external: return "" - # Although Intel GPU ATen library is out-of-tree, it still utilizes torchgen to produce structrued + # Although Intel GPU ATen library is out-of-tree, it still utilizes torchgen to produce structured # kernels. Regarding these produced structured kernels, they should be visible for the Intel GPU ATen # library. Therefore, we need to add "TORCH_XPU_API" prefix to these structured kernels, # rather than "TORCH_API". Because the semantic of "TORCH_API" is "hidden" for out-of-tree backends. diff --git a/torchgen/dest/register_dispatch_key.py b/torchgen/dest/register_dispatch_key.py index ffe90bcaba85..52bb9602a73f 100644 --- a/torchgen/dest/register_dispatch_key.py +++ b/torchgen/dest/register_dispatch_key.py @@ -764,7 +764,7 @@ resize_out(out, sizes, strides, options); # we generate CompositeExplicitAutogradNonFunctional implementations of functional and inplace # based on the out implementation. But in fact, out is definable by # functional too (just not very efficiently), and this is honestly the - # MORE likely situation for a backend implementor. How do we pick? + # MORE likely situation for a backend implementer. How do we pick? # Well, taking a page from Haskell type classes and default methods, # we could conceivably register a circular definition (out in terms # of functional, and functional in terms of out) and just require @@ -777,7 +777,7 @@ resize_out(out, sizes, strides, options); and f.func.kind() is SchemaKind.out ): # Never generate a default implementation for out, that's what you - # have to define as a backend implementor + # have to define as a backend implementer return None # Note [Direct dispatch bindings] diff --git a/torchgen/dest/ufunc.py b/torchgen/dest/ufunc.py index 832316d018e8..045d8de110e7 100644 --- a/torchgen/dest/ufunc.py +++ b/torchgen/dest/ufunc.py @@ -42,7 +42,7 @@ if TYPE_CHECKING: # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # NB: not bothering to generate dispatch stub forward declaration in header, -# we can just paste it whereever necessary +# we can just paste it wherever necessary # TODO: use BackendIndex # dispatch_key: DispatchKey # only CPU/CUDA right now diff --git a/torchgen/gen.py b/torchgen/gen.py index 42da383b586d..80bc6e58de86 100644 --- a/torchgen/gen.py +++ b/torchgen/gen.py @@ -506,7 +506,7 @@ def static_dispatch( ) -> str: """ For a given `NativeFunction`, find out the corresponding backend and dispatch to it. If more than one - backends exsit, fallback to static dispatch by determining dispatch key from inputs. + backends exist, fallback to static dispatch by determining dispatch key from inputs. Arguments: sig: A CppSignature or DispatcherSignature for this native function we want to use. f: NativeFunction to generate static dispatch. @@ -2611,7 +2611,7 @@ def gen_source_files( # but they could theoretically be called from user code (I added these kernels for completeness, # since the ops are part of the public API). # (2) A derivative formula for every {view}_copy operator - # {view}_copy operators can re-use the same derivative formulas as their {view} op counterparts, + # {view}_copy operators can reuse the same derivative formulas as their {view} op counterparts, # so rather than stamping all of the entries out in derivatives.yaml, # we codegen them in. # This is similar to how autograd codegen doesn't require inplace ops to have a derivatives.yaml entry. diff --git a/torchgen/gen_functionalization_type.py b/torchgen/gen_functionalization_type.py index bf4b884d849f..42407974087a 100644 --- a/torchgen/gen_functionalization_type.py +++ b/torchgen/gen_functionalization_type.py @@ -198,7 +198,7 @@ def is_tensor_like(a: Argument | TensorOptionsArguments | SelfArgument) -> bool: # We need to wrap / unwrap various arguments from the op in the functionalization kernels. # Some op schemas include non-owning types though (like TensorList), # and when we unwrap them we expect to get out an owning type!. -# We also return a lambda that tells you how to conver the non-owning type argument into the owning type. +# We also return a lambda that tells you how to convert the non-owning type argument into the owning type. def get_owning_type(t: CType) -> tuple[CType, Callable[[str], str]]: if t == BaseCType(tensorListT): return VectorCType(BaseCType(tensorT)), lambda x: f"{x}.vec()" @@ -441,7 +441,7 @@ def emit_view_functionalization_body( // This function adds the above view meta to the current tensor and replays them off the base, // mutating the size/stride info of the current FunctionalTensorWrapper. // Because of this, we need to make sure to run the reference shape function above, - // BEFORE doing this (otherwise we'll end up runnin the reference function using the wrong sizes/strides) + // BEFORE doing this (otherwise we'll end up running the reference function using the wrong sizes/strides) at::functionalization::impl::mutate_view_meta({view_tensor_name}, view_meta); // See Note [Propagating strides in the functionalization pass] // XLA/LTC don't implement the logic to propagate strides correctly, so we need to rely diff --git a/torchgen/gen_schema_utils.py b/torchgen/gen_schema_utils.py index 1095d2e7e431..b81c91527baa 100644 --- a/torchgen/gen_schema_utils.py +++ b/torchgen/gen_schema_utils.py @@ -47,7 +47,7 @@ class TypeGen: all_base_tys = [TypeGen.from_example(x) for x in obj] if len(set(all_base_tys)) > 1: raise RuntimeError( - f"Cannot generate schema for a seqeunce of args of heterogeneous types: {all_base_tys}. " + f"Cannot generate schema for a sequence of args of heterogeneous types: {all_base_tys}. " "Consider unpacking the argument and give proper names to them if possible " "instead of using *args." ) diff --git a/torchgen/model.py b/torchgen/model.py index 89a56d98e74e..826b37863f00 100644 --- a/torchgen/model.py +++ b/torchgen/model.py @@ -593,7 +593,7 @@ class NativeFunction: has_composite_explicit_autograd_non_functional_kernel: bool # Tags are used to describe semantic information about (groups of) operators, - # That aren't easily inferrable directly from the operator's schema. + # That aren't easily inferable directly from the operator's schema. tags: set[str] # NB: The benefit of defining a dataclass is that we automatically get diff --git a/torchgen/shape_functions/gen_jit_shape_functions.py b/torchgen/shape_functions/gen_jit_shape_functions.py index 56a3d8bf0dd3..6238f9741f87 100644 --- a/torchgen/shape_functions/gen_jit_shape_functions.py +++ b/torchgen/shape_functions/gen_jit_shape_functions.py @@ -102,7 +102,7 @@ def gen_serialized_decompisitions() -> str: output_strs.append(curr_str) final_output = "" - # Windows compiler doesnt correctly handle adjacent + # Windows compiler doesn't correctly handle adjacent # string literals for output_str in output_strs: start = '+ std::string(R"=====('