From 297805fd8f59b76a28048a79e8bced2616ed8713 Mon Sep 17 00:00:00 2001 From: Sean McGovern Date: Sat, 14 Jun 2025 03:37:33 +0000 Subject: [PATCH] Typo fixes for "overridden" in comments and function names (#155944) This word appears often in class descriptions and is not consistently spelled. Update comments and some function names to use the correct spelling consistently. Facilitates searching the codebase. Pull Request resolved: https://github.com/pytorch/pytorch/pull/155944 Approved by: https://github.com/Skylion007 --- aten/src/ATen/core/dispatch/OperatorEntry.cpp | 2 +- benchmarks/tensorexpr/benchmark.py | 2 +- c10/core/DispatchKeySet.h | 2 +- test/dynamo/test_buffers_override.py | 2 +- test/dynamo/test_subclasses.py | 18 +++++++++--------- test/dynamo/test_unspec.py | 2 +- test/inductor/test_aot_inductor_package.py | 2 +- test/nn/test_lazy_modules.py | 4 ++-- test/onnx/internal/test_registraion.py | 2 +- test/quantization/pt2e/test_quantize_pt2e.py | 2 +- test/test_cpp_extensions_aot.py | 4 ++-- test/test_dispatch.py | 2 +- test/test_overrides.py | 2 +- test/test_schema_check.py | 2 +- tools/dynamo/graph_break_registry.json | 6 +++--- torch/_dynamo/pgo.py | 2 +- torch/_dynamo/variables/misc.py | 2 +- torch/_dynamo/variables/torch_function.py | 18 +++++++++--------- torch/_functorch/autograd_function.py | 10 +++++----- torch/_higher_order_ops/triton_kernel_wrap.py | 2 +- torch/_inductor/config.py | 2 +- torch/_python_dispatcher.py | 4 ++-- torch/_subclasses/meta_utils.py | 2 +- .../data_sparsifier/data_norm_sparsifier.py | 2 +- .../sparsifier/weight_norm_sparsifier.py | 2 +- torch/ao/quantization/fx/prepare.py | 2 +- torch/csrc/autograd/engine.cpp | 2 +- torch/csrc/autograd/python_variable.cpp | 8 ++++---- torch/distributed/elastic/agent/server/api.py | 2 +- torch/overrides.py | 4 ++-- torch/profiler/_pattern_matcher.py | 2 +- torch/testing/_internal/common_device_type.py | 2 +- torch/utils/_config_module.py | 2 +- torch/utils/_ordered_set.py | 2 +- torchgen/_autoheuristic/train_decision.py | 2 +- 35 files changed, 64 insertions(+), 64 deletions(-) diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.cpp b/aten/src/ATen/core/dispatch/OperatorEntry.cpp index 19c08359b78d..7b775509ba51 100644 --- a/aten/src/ATen/core/dispatch/OperatorEntry.cpp +++ b/aten/src/ATen/core/dispatch/OperatorEntry.cpp @@ -353,7 +353,7 @@ std::pair OperatorEntry::computeDispatchTab // CompositExplicitAutogradNonFunctional > CompositeExplicitAutograd > CompositeImplicitAutograd > Autograd // Note [CompositeExplicitAutograd and CompositeImplicitAutograd] // When there're registrations to both CompositeExplicitAutograd & CompositeImplicitAutograd & Autograd, from (2.2) we know CompositeExplicitAutograd - // and Autograd kernels will be picked up and CompositeImplicitAutograd is overriden. + // and Autograd kernels will be picked up and CompositeImplicitAutograd is overridden. // This is fine and in practice CompositeExplicitAutograd and CompositeImplicitAutograd shouldn't co-exist for an op. // TODO: Update alias key precedence after we add new alias keys AutogradDispatchCPUOrCUDA . diff --git a/benchmarks/tensorexpr/benchmark.py b/benchmarks/tensorexpr/benchmark.py index 38951ac5091f..f0935aaf0d28 100644 --- a/benchmarks/tensorexpr/benchmark.py +++ b/benchmarks/tensorexpr/benchmark.py @@ -34,7 +34,7 @@ class Benchmark: for method in dir(self.engine): if not callable(getattr(self.engine, method)): continue - # don't forward if this function is overriden here + # don't forward if this function is overridden here if hasattr(self, method): continue # don't forward if it is a internal function diff --git a/c10/core/DispatchKeySet.h b/c10/core/DispatchKeySet.h index 49dafe1e3cb0..4de19c9ce5bf 100644 --- a/c10/core/DispatchKeySet.h +++ b/c10/core/DispatchKeySet.h @@ -115,7 +115,7 @@ C10_ALWAYS_INLINE static const std:: // Not every backend and not every functionality counts as a "building block // key". This is mostly to give us more levers to pull in the design space. // Backend keys and functionality keys that count as "building blocks" will -// contribute to a full cross product of functionality that can be overriden. +// contribute to a full cross product of functionality that can be overridden. // // For example, right now we have at least 12 "backend" building // blocks (CPU, CUDA, XLA, ...) and at least 5 "functionality" diff --git a/test/dynamo/test_buffers_override.py b/test/dynamo/test_buffers_override.py index 946283dc4f19..3ceba631423d 100644 --- a/test/dynamo/test_buffers_override.py +++ b/test/dynamo/test_buffers_override.py @@ -30,7 +30,7 @@ class TestBuffersOverride(torch._dynamo.test_case.TestCase): super().__init__() # Override buffers; should not cause breakage # but skip the marking static here since - # named_buffers is overriden + # named_buffers is overridden self.register_buffer("B", torch.ones(3, 3)) self.named_buffers = [] diff --git a/test/dynamo/test_subclasses.py b/test/dynamo/test_subclasses.py index e9e8b9a95782..b741c6b5b9c4 100644 --- a/test/dynamo/test_subclasses.py +++ b/test/dynamo/test_subclasses.py @@ -735,7 +735,7 @@ class SubclassTests(torch._dynamo.test_case.TestCase): self.assertEqual(res_exp, res_act) - def test_user_overidden_method_unsupported(self): + def test_user_overridden_method_unsupported(self): class LocalSubclass(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): @@ -755,7 +755,7 @@ class SubclassTests(torch._dynamo.test_case.TestCase): self.assertEqual(res_exp, res_act) - def test_user_overidden_attr_unsupported(self): + def test_user_overridden_attr_unsupported(self): class LocalSubclass(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): @@ -769,12 +769,12 @@ class SubclassTests(torch._dynamo.test_case.TestCase): def fn(x): return x.ndim - msg = "`torch.compile` only support tracing certain types of overriden tensor subclass attributes" + msg = "`torch.compile` only support tracing certain types of overridden tensor subclass attributes" with self.assertRaisesRegex(torch._dynamo.exc.Unsupported, msg): x = torch.ones(2, 2).as_subclass(LocalSubclass) fn(x) - def test_user_overidden_property_unsupported(self): + def test_user_overridden_property_unsupported(self): class LocalSubclass(torch.Tensor): def __init__(self, *args, **kwargs) -> None: self._ndim = 10 @@ -988,8 +988,8 @@ class SubclassTests(torch._dynamo.test_case.TestCase): self.assertEqual(x0, x1) self.assertEqual(x0.tensor_shape, x1.tensor_shape) - def test_subclass_dont_invoke_torch_function_on_overriden_method(self): - # We shouldn't fire `__torch_function__` for overriden tensor methods. + def test_subclass_dont_invoke_torch_function_on_overridden_method(self): + # We shouldn't fire `__torch_function__` for overridden tensor methods. class MySubclass(torch.Tensor): def to(self, device): return self * len(device) @@ -1011,10 +1011,10 @@ class SubclassTests(torch._dynamo.test_case.TestCase): res_act = fn_opt(x) self.assertEqual(res_exp, res_act) - def test_subclass_dont_invoke_torch_function_on_overriden_attr(self): + def test_subclass_dont_invoke_torch_function_on_overridden_attr(self): from types import MethodWrapperType - # We shouldn't fire `__torch_function__` for overriden tensor attrs. + # We shouldn't fire `__torch_function__` for overridden tensor attrs. class MySubclass(torch.Tensor): def ndim(self): return 42 @@ -1204,7 +1204,7 @@ class SubclassTests(torch._dynamo.test_case.TestCase): def test_nontraceable_tensor_subclass(self): # This will error if Dynamo tries to wrap it as a tensor variable, # because that involves calling certain methods to inspect the tensor - # property, which will blow up in the overriden `__torch_function__`. + # property, which will blow up in the overridden `__torch_function__`. class MySubclass(torch.Tensor): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): diff --git a/test/dynamo/test_unspec.py b/test/dynamo/test_unspec.py index ecba213ebddb..01c076cb9d90 100644 --- a/test/dynamo/test_unspec.py +++ b/test/dynamo/test_unspec.py @@ -266,7 +266,7 @@ class UnspecTests(torch._dynamo.test_case.TestCase): self.assertEqual(rand2_1.getstate(), rand2_2.getstate()) self.assertEqual(rand3_1.getstate(), rand3_2.getstate()) - def test_random_object_overriden_methods(self): + def test_random_object_overridden_methods(self): # these will result in graph breaks, but we shouldn't crash def get_rng(): rand1 = random.Random(1) diff --git a/test/inductor/test_aot_inductor_package.py b/test/inductor/test_aot_inductor_package.py index 0f5ce7d633b3..aa2569a9b543 100644 --- a/test/inductor/test_aot_inductor_package.py +++ b/test/inductor/test_aot_inductor_package.py @@ -136,7 +136,7 @@ class TestAOTInductorPackage(TestCase): def test_remove_intermediate_files(self): # For CUDA, generated cpp files contain absolute path to the generated cubin files. - # With the package artifact, that cubin path should be overriden at the run time, + # With the package artifact, that cubin path should be overridden at the run time, # so removing those intermeidate files in this test to verify that. class Model(torch.nn.Module): def forward(self, x, y): diff --git a/test/nn/test_lazy_modules.py b/test/nn/test_lazy_modules.py index d64020c2dcc6..6cc78cbfc51a 100644 --- a/test/nn/test_lazy_modules.py +++ b/test/nn/test_lazy_modules.py @@ -33,7 +33,7 @@ class TestLazyModules(TestCase): new_module.register_parameter("test_param", nn.Parameter(torch.ones(5, 5))) with self.assertRaisesRegex(RuntimeError, "shape of an uninitialized"): new_module.load_state_dict(state_dict) - # Uninitialized parameters are overriden when the state dict to be loaded contains a valid one + # Uninitialized parameters are overridden when the state dict to be loaded contains a valid one new_module = LazyModule() new_module.register_parameter("test_param", nn.Parameter(torch.ones(5, 5))) module.load_state_dict(new_module.state_dict()) @@ -62,7 +62,7 @@ class TestLazyModules(TestCase): new_module.test_buffer = Buffer(torch.ones(5, 5)) with self.assertRaisesRegex(RuntimeError, "shape of an uninitialized"): new_module.load_state_dict(state_dict) - # Uninitialized parameters are overriden when the state dict to be loaded contains a valid one + # Uninitialized parameters are overridden when the state dict to be loaded contains a valid one new_module = LazyModule() new_module.test_buffer = Buffer(torch.ones(5, 5)) module.load_state_dict(new_module.state_dict()) diff --git a/test/onnx/internal/test_registraion.py b/test/onnx/internal/test_registraion.py index 39afcc24ee65..e357dbff713a 100644 --- a/test/onnx/internal/test_registraion.py +++ b/test/onnx/internal/test_registraion.py @@ -144,7 +144,7 @@ class TestOverrideDict(common_utils.TestCase): self.assertEqual(len(self.override_dict), 0) self.assertNotIn("a", self.override_dict) - def test_overriden_key_precededs_base_key_regardless_of_insert_order(self): + def test_overridden_key_precedes_base_key_regardless_of_insert_order(self): self.override_dict.set_base("a", 42) self.override_dict.override("a", 100) self.override_dict.set_base("a", 0) diff --git a/test/quantization/pt2e/test_quantize_pt2e.py b/test/quantization/pt2e/test_quantize_pt2e.py index 3f9fde0444a0..9ffb63028ff2 100644 --- a/test/quantization/pt2e/test_quantize_pt2e.py +++ b/test/quantization/pt2e/test_quantize_pt2e.py @@ -2552,7 +2552,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase): torch.ops.quantized_decomposed.quantize_per_tensor.default, torch.ops.quantized_decomposed.dequantize_per_tensor.default, ): - # Entire graph share the same qspec which was overriden by FixedQParamsObserver + # Entire graph share the same qspec which was overridden by FixedQParamsObserver self.assertEqual(n.args[1], 0.125) self.assertEqual(n.args[2], 42) diff --git a/test/test_cpp_extensions_aot.py b/test/test_cpp_extensions_aot.py index a8a93f14996b..8bca2264d002 100644 --- a/test/test_cpp_extensions_aot.py +++ b/test/test_cpp_extensions_aot.py @@ -317,13 +317,13 @@ class TestMAIATensor(common.TestCase): weight = torch.empty(6, 4, 2, 2, device="maia", requires_grad=True) bias = torch.empty(6, device="maia") - # Make sure forward is overriden + # Make sure forward is overridden out = torch.nn.functional.conv2d(input, weight, bias, 2, 0, 1, 1) self.assertEqual(maia_extension.get_test_int(), 2) self.assertEqual(out.shape[0], input.shape[0]) self.assertEqual(out.shape[1], weight.shape[0]) - # Make sure backward is overriden + # Make sure backward is overridden # Double backward is dispatched to _convolution_double_backward. # It is not tested here as it involves more computation/overrides. grad = torch.autograd.grad(out, input, out, create_graph=True) diff --git a/test/test_dispatch.py b/test/test_dispatch.py index 0e77c31915e5..046faea9c484 100644 --- a/test/test_dispatch.py +++ b/test/test_dispatch.py @@ -1118,7 +1118,7 @@ CompositeImplicitAutograd[alias] fn_CompositeImplicitAutograd def test_duplicate_registrations(self): dispatcher = PythonDispatcher() - with self.assertRaisesRegex(RuntimeError, r"Overriden is not allowed"): + with self.assertRaisesRegex(RuntimeError, r"Overridden is not allowed"): dispatcher.register(["CPU", "CPU"]) def test_defaultbackend_math(self): diff --git a/test/test_overrides.py b/test/test_overrides.py index fc47b72bfbce..8575bb90271c 100644 --- a/test/test_overrides.py +++ b/test/test_overrides.py @@ -368,7 +368,7 @@ class TensorLike: """A class that overrides the full torch API This class is used to explicitly test that the full torch.tensor API - can be overriden with a class that defines __torch_function__. + can be overridden with a class that defines __torch_function__. """ @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): diff --git a/test/test_schema_check.py b/test/test_schema_check.py index 9e1d6a6f1250..29ea36fd8a5f 100644 --- a/test/test_schema_check.py +++ b/test/test_schema_check.py @@ -232,7 +232,7 @@ class TestSchemaCheck(JitTestCase): actual = x.relu().sin() self.assertEqual(expected, actual) - # Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overriden + # Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overridden def test_schema_check_mode_functionality_default_replaced(self): x = torch.rand((3, 3), requires_grad=True) expected = x.add(x, alpha=2) diff --git a/tools/dynamo/graph_break_registry.json b/tools/dynamo/graph_break_registry.json index 7a59a0832370..ac0dabe7a556 100644 --- a/tools/dynamo/graph_break_registry.json +++ b/tools/dynamo/graph_break_registry.json @@ -999,7 +999,7 @@ ], "GB0103": [ { - "Gb_type": "Tensor subclass overriden method call", + "Gb_type": "Tensor subclass overridden method call", "Context": "{name}", "Explanation": "`torch.compile` currently can't trace this", "Hints": [ @@ -1601,9 +1601,9 @@ ], "GB0164": [ { - "Gb_type": "Unsupported tensor subclass overriden attribute access", + "Gb_type": "Unsupported tensor subclass overridden attribute access", "Context": "{name}", - "Explanation": "`torch.compile` only support tracing certain types of overriden tensor subclass attributes", + "Explanation": "`torch.compile` only support tracing certain types of overridden tensor subclass attributes", "Hints": [ "Avoid accessing {name} of tensor subclass in torch.compile region", "Renaming attribute `{name}` of type {self.class_type}", diff --git a/torch/_dynamo/pgo.py b/torch/_dynamo/pgo.py index af1ac18a43ce..5ca7c641b40a 100644 --- a/torch/_dynamo/pgo.py +++ b/torch/_dynamo/pgo.py @@ -637,7 +637,7 @@ class PGOCacheArtifact(CacheArtifact): update the key to use the new MAST job's name and version. """ if not original_key.startswith("mast:"): - # if original_key is overriden, then dont change it + # if original_key is overridden, then dont change it return original_key if (new_key := get_cache_key()) is not None: return new_key diff --git a/torch/_dynamo/variables/misc.py b/torch/_dynamo/variables/misc.py index 44a4e95d03c1..c2e765fdf305 100644 --- a/torch/_dynamo/variables/misc.py +++ b/torch/_dynamo/variables/misc.py @@ -1758,7 +1758,7 @@ class RandomVariable(VariableTracker): """random.Random() Implemented by wrapping a VariableTracker around a random.Random object. - The supported methods for the random.Random object cannot be overriden. + The supported methods for the random.Random object cannot be overridden. Assumes that random objects behave the same given a set seed or state. """ diff --git a/torch/_dynamo/variables/torch_function.py b/torch/_dynamo/variables/torch_function.py index 26e3ee8aa0c9..9840275ec718 100644 --- a/torch/_dynamo/variables/torch_function.py +++ b/torch/_dynamo/variables/torch_function.py @@ -487,7 +487,7 @@ def _get_subclass_type_var(tx: "InstructionTranslator", var): return VariableTracker.build(tx, var.python_type(), source) -def _is_attr_overidden(tx: "InstructionTranslator", var, name): +def _is_attr_overridden(tx: "InstructionTranslator", var, name): import torch overridden = False @@ -640,11 +640,11 @@ class TensorWithTFOverrideVariable(TensorVariable): ], ) - # Handle non-overriden attributes inherited from `torch.Tensor`. - attr_is_overriden = _is_attr_overidden(tx, self, name) + # Handle non-overridden attributes inherited from `torch.Tensor`. + attr_is_overridden = _is_attr_overridden(tx, self, name) if ( hasattr(torch.Tensor, name) - and not attr_is_overriden + and not attr_is_overridden and not inspect.ismethoddescriptor(getattr(torch.Tensor, name)) ): args, kwargs = [self], {} @@ -694,11 +694,11 @@ class TensorWithTFOverrideVariable(TensorVariable): attr.__func__, self.class_type_var(tx), source=attr_source ) - elif attr_is_overriden: + elif attr_is_overridden: unimplemented_v2( - gb_type="Unsupported tensor subclass overriden attribute access", + gb_type="Unsupported tensor subclass overridden attribute access", context=f"{name}", - explanation="`torch.compile` only support tracing certain types of overriden tensor subclass attributes", + explanation="`torch.compile` only support tracing certain types of overridden tensor subclass attributes", hints=[ f"Avoid accessing {name} of tensor subclass in torch.compile region", f"Renaming attribute `{name}` of type {self.class_type}", @@ -735,9 +735,9 @@ class TensorWithTFOverrideVariable(TensorVariable): if can_dispatch_torch_function(tx, tf_args, kwargs): import torch - if _is_attr_overidden(tx, self, name): + if _is_attr_overridden(tx, self, name): unimplemented_v2( - gb_type="Tensor subclass overriden method call", + gb_type="Tensor subclass overridden method call", context=f"{name}", explanation="`torch.compile` currently can't trace this", hints=[ diff --git a/torch/_functorch/autograd_function.py b/torch/_functorch/autograd_function.py index bc715c44ed85..c29f52fe6ba9 100644 --- a/torch/_functorch/autograd_function.py +++ b/torch/_functorch/autograd_function.py @@ -258,7 +258,7 @@ class VmapInfo(NamedTuple): randomness: str -def has_overriden_vmap_rule(autograd_function): +def has_overridden_vmap_rule(autograd_function): return autograd_function.vmap is not torch.autograd.Function.vmap @@ -286,14 +286,14 @@ def custom_function_call_vmap(interpreter, autograd_function, *operands, **kwarg ) if autograd_function.generate_vmap_rule: - if has_overriden_vmap_rule(autograd_function): + if has_overridden_vmap_rule(autograd_function): # TODO: Update link to stable once that's out # https://github.com/pytorch/pytorch/issues/92029 raise RuntimeError( f"You tried to vmap over {autograd_function.__name__}, but " - f"it has both generate_vmap_rule=True and an overriden vmap " + f"it has both generate_vmap_rule=True and an overridden vmap " f"staticmethod. Please set generate_vmap_rule=False or delete " - f"the overriden vmap staticmethod to avoid ambiguity. " + f"the overridden vmap staticmethod to avoid ambiguity. " f"For more details, please see " f"https://pytorch.org/docs/main/notes/extending.func.html" ) @@ -301,7 +301,7 @@ def custom_function_call_vmap(interpreter, autograd_function, *operands, **kwarg interpreter, autograd_function, *operands ) - if not has_overriden_vmap_rule(autograd_function): + if not has_overridden_vmap_rule(autograd_function): # TODO: Update link to stable once that's out # https://github.com/pytorch/pytorch/issues/92029 raise RuntimeError( diff --git a/torch/_higher_order_ops/triton_kernel_wrap.py b/torch/_higher_order_ops/triton_kernel_wrap.py index 465f0f89c53b..b3342f115dae 100644 --- a/torch/_higher_order_ops/triton_kernel_wrap.py +++ b/torch/_higher_order_ops/triton_kernel_wrap.py @@ -1227,7 +1227,7 @@ class TritonHOPifier: to the HOP (which can then be traced). Because Dynamo has its own calling conventions for e.g. invoking a user-defined function - TritonHOPifier is an abstract class that can be overriden by its subclasses. + TritonHOPifier is an abstract class that can be overridden by its subclasses. """ def raise_unsupported(self, msg: str) -> Never: diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py index 26ed496130a2..9160cc450c99 100644 --- a/torch/_inductor/config.py +++ b/torch/_inductor/config.py @@ -1647,7 +1647,7 @@ class trace: compile_profile = False # Upload the .tar.gz file - # Needs to be overriden based on specific environment needs + # Needs to be overridden based on specific environment needs upload_tar: Optional[Callable[[str], None]] = None log_autotuning_results: bool = False diff --git a/torch/_python_dispatcher.py b/torch/_python_dispatcher.py index 2dfdbb296a4b..d2d4fbbf621e 100644 --- a/torch/_python_dispatcher.py +++ b/torch/_python_dispatcher.py @@ -92,10 +92,10 @@ class PythonDispatcher: """ def register(self, dispatchKeys): - # Overriden is not supported and triggers a warning in C++ dispatcher. + # Overridden is not supported and triggers a warning in C++ dispatcher. if len(set(dispatchKeys)) != len(dispatchKeys): raise RuntimeError( - f"Overriden is not allowed but found duplicates in {dispatchKeys}." + f"Overridden is not allowed but found duplicates in {dispatchKeys}." ) # We currently forbid this in codegen instead of C++ dispatcher. if ( diff --git a/torch/_subclasses/meta_utils.py b/torch/_subclasses/meta_utils.py index 6dc03369cc90..0c5ad5b16511 100644 --- a/torch/_subclasses/meta_utils.py +++ b/torch/_subclasses/meta_utils.py @@ -385,7 +385,7 @@ class MetaTensorDescriber: is_leaf=is_leaf, requires_grad=t.requires_grad, # NB: ndim should be OK too but there is a disaster at - # python test/dynamo/test_subclasses.py -k test_user_overidden_property_unsupported + # python test/dynamo/test_subclasses.py -k test_user_overridden_property_unsupported # Actually, this means that we have a little bit of a problem # here, which is that there is some sensitivity to how exactly an # access is done if you have a __torch_function__ subclass. Maybe diff --git a/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py b/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py index c57b639af82e..ff4b4f913f50 100644 --- a/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py +++ b/torch/ao/pruning/_experimental/data_sparsifier/data_norm_sparsifier.py @@ -32,7 +32,7 @@ class DataNormSparsifier(BaseDataSparsifier): zeros_per_block: Number of zeros in a sparse block Note:: All arguments to the DataNormSparsifier constructor are "default" - arguments and could be overriden by the configuration provided in the + arguments and could be overridden by the configuration provided in the `add_data` step. """ diff --git a/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py b/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py index 58c0f7efa37d..89c707ad33e6 100644 --- a/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py +++ b/torch/ao/pruning/sparsifier/weight_norm_sparsifier.py @@ -52,7 +52,7 @@ class WeightNormSparsifier(BaseSparsifier): Note:: All arguments to the WeightNormSparsifier constructor are "default" - arguments and could be overriden by the configuration provided in the + arguments and could be overridden by the configuration provided in the `prepare` step. """ diff --git a/torch/ao/quantization/fx/prepare.py b/torch/ao/quantization/fx/prepare.py index e6fb3cda3bcf..4860ee39a7e8 100644 --- a/torch/ao/quantization/fx/prepare.py +++ b/torch/ao/quantization/fx/prepare.py @@ -1502,7 +1502,7 @@ def insert_observers_for_model( # first, populate the dtype map based only on qconfig and qhandler # this assumes: - # graph inputs are fp32 by default, and int8 where overriden + # graph inputs are fp32 by default, and int8 where overridden # other nodes output dtype is specified by the qconfig named_modules = dict(model.named_modules(remove_duplicate=False)) diff --git a/torch/csrc/autograd/engine.cpp b/torch/csrc/autograd/engine.cpp index 8b5d0536df0e..6f5df7de1103 100644 --- a/torch/csrc/autograd/engine.cpp +++ b/torch/csrc/autograd/engine.cpp @@ -1471,7 +1471,7 @@ c10::intrusive_ptr Engine::execute_with_graph_task( return graph_task->future_result_; } -// note that when python is present, this base engine will be overriden +// note that when python is present, this base engine will be overridden // with a PythonEngine. Because this typically happens before get_default_engine // is called, this base engine will never be created. Engine& Engine::get_base_engine() { diff --git a/torch/csrc/autograd/python_variable.cpp b/torch/csrc/autograd/python_variable.cpp index af31d5911f6e..fc8285a069b2 100644 --- a/torch/csrc/autograd/python_variable.cpp +++ b/torch/csrc/autograd/python_variable.cpp @@ -238,7 +238,7 @@ void registerPythonTensorClass( c10::Device dev(device); TORCH_CHECK( - dev.type() == kXLA, "Only the python class for XLA can be overriden"); + dev.type() == kXLA, "Only the python class for XLA can be overridden"); if (device_to_py_class_[static_cast(dev.type())] != nullptr) { TORCH_WARN( "Overriding a previously registered python class for ", dev.str()); @@ -409,13 +409,13 @@ static bool THPVariable_tryResurrect(THPVariable* self) { static int THPFake_traverse(THPVariable* self, visitproc visit, void* arg) { TORCH_INTERNAL_ASSERT( - false, "TensorBase tp_traverse function was not overriden properly"); + false, "TensorBase tp_traverse function was not overridden properly"); return 0; } static int THPFake_clear(THPVariable* self) { TORCH_INTERNAL_ASSERT( - false, "TensorBase tp_clear function was not overriden properly"); + false, "TensorBase tp_clear function was not overridden properly"); return 0; } @@ -2330,7 +2330,7 @@ int THPVariableMetaType_init(PyObject* cls, PyObject* args, PyObject* kwargs) { if (PyType_Type.tp_init(cls, args, kwargs) < 0) { return -1; } - // It is important for all three of these to be overriden correctly for the + // It is important for all three of these to be overridden correctly for the // resurrection checks to properly happen. In particular, an older version // was not overriding tp_clear here. This lead to the default subtype_clear // running on the Tensor object (as only TensorBase tp_clear was custom), diff --git a/torch/distributed/elastic/agent/server/api.py b/torch/distributed/elastic/agent/server/api.py index 88aebe078ad5..00ebfeee74ff 100644 --- a/torch/distributed/elastic/agent/server/api.py +++ b/torch/distributed/elastic/agent/server/api.py @@ -500,7 +500,7 @@ class SimpleElasticAgent(ElasticAgent): group_rank = rdzv_info.rank group_world_size = rdzv_info.world_size - # master_addr/master_port could be explicitly overriden + # master_addr/master_port could be explicitly overridden # TODO: BC - specific to static rdzv and can be simplifed further master_addr = spec.master_addr or rdzv_info.bootstrap_store_info.master_addr master_port = spec.master_port or rdzv_info.bootstrap_store_info.master_port diff --git a/torch/overrides.py b/torch/overrides.py index f4edecd664b1..7f89475a164d 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -424,7 +424,7 @@ def get_testing_overrides() -> dict[Callable, Callable]: >>> inspect.signature(my_add) """ - # Every function in the PyTorchAPI that can be overriden needs an entry + # Every function in the PyTorchAPI that can be overridden needs an entry # in this dict. # # Optimally we would use inspect to get the function signature and define @@ -1881,7 +1881,7 @@ def _get_overridable_functions() -> tuple[ if ignore: continue - # cannot be overriden by __torch_function__ + # cannot be overridden by __torch_function__ if func in get_ignored_functions(): msg = ( "{}.{} is in the tuple returned by torch._overrides.get_ignored_functions " diff --git a/torch/profiler/_pattern_matcher.py b/torch/profiler/_pattern_matcher.py index 41748ea39545..ba522e54f329 100644 --- a/torch/profiler/_pattern_matcher.py +++ b/torch/profiler/_pattern_matcher.py @@ -90,7 +90,7 @@ class Pattern: def match(self, event: _ProfilerEvent): """ Return True if the event matches the pattern. - This method should be overriden in subclass. + This method should be overridden in subclass. """ raise NotImplementedError diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py index b74972b00dd2..03ac82ce50c6 100644 --- a/torch/testing/_internal/common_device_type.py +++ b/torch/testing/_internal/common_device_type.py @@ -1320,7 +1320,7 @@ def largeTensorTest(size, device=None, inductor=TEST_WITH_TORCHINDUCTOR): size may be a number of bytes, a string of the form "N GB", or a callable If the test is a device generic test, available memory on the primary device will be checked. - It can also be overriden by the optional `device=` argument. + It can also be overridden by the optional `device=` argument. In other tests, the `device=` argument needs to be specified. """ if isinstance(size, str): diff --git a/torch/utils/_config_module.py b/torch/utils/_config_module.py index 01a421f53084..d04af1fb2718 100644 --- a/torch/utils/_config_module.py +++ b/torch/utils/_config_module.py @@ -439,7 +439,7 @@ class ConfigModule(ModuleType): def _is_default(self, name: str) -> bool: """ Returns true if the config is at its default value. - configs overriden by the env are not considered default. + configs overridden by the env are not considered default. """ config_val = self._config[name] # The config is not overridden by the user, and the env_value_default diff --git a/torch/utils/_ordered_set.py b/torch/utils/_ordered_set.py index 29373289c426..2bead0e00b12 100644 --- a/torch/utils/_ordered_set.py +++ b/torch/utils/_ordered_set.py @@ -33,7 +33,7 @@ class OrderedSet(MutableSet[T], Reversible[T]): return s # - # Required overriden abstract methods + # Required overridden abstract methods # def __contains__(self, elem: object) -> bool: return elem in self._dict diff --git a/torchgen/_autoheuristic/train_decision.py b/torchgen/_autoheuristic/train_decision.py index f27a30b48fb5..932baf16e845 100644 --- a/torchgen/_autoheuristic/train_decision.py +++ b/torchgen/_autoheuristic/train_decision.py @@ -94,7 +94,7 @@ class AHTrainDecisionTree(AHTrain): def get_grid_search_values(self): """ - Standard values for grid search. Can be overriden. + Standard values for grid search. Can be overridden. """ return { "max_depth": [5, 6, 7],