mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
Typo fixes for "overridden" in comments and function names (#155944)
This word appears often in class descriptions and is not consistently spelled. Update comments and some function names to use the correct spelling consistently. Facilitates searching the codebase. Pull Request resolved: https://github.com/pytorch/pytorch/pull/155944 Approved by: https://github.com/Skylion007
This commit is contained in:
committed by
PyTorch MergeBot
parent
ca3cabd24a
commit
297805fd8f
@ -353,7 +353,7 @@ std::pair<const AnnotatedKernel&, const char*> OperatorEntry::computeDispatchTab
|
||||
// CompositExplicitAutogradNonFunctional > CompositeExplicitAutograd > CompositeImplicitAutograd > Autograd
|
||||
// Note [CompositeExplicitAutograd and CompositeImplicitAutograd]
|
||||
// When there're registrations to both CompositeExplicitAutograd & CompositeImplicitAutograd & Autograd, from (2.2) we know CompositeExplicitAutograd
|
||||
// and Autograd kernels will be picked up and CompositeImplicitAutograd is overriden.
|
||||
// and Autograd kernels will be picked up and CompositeImplicitAutograd is overridden.
|
||||
// This is fine and in practice CompositeExplicitAutograd and CompositeImplicitAutograd shouldn't co-exist for an op.
|
||||
// TODO: Update alias key precedence after we add new alias keys AutogradDispatchCPUOrCUDA .
|
||||
|
||||
|
@ -34,7 +34,7 @@ class Benchmark:
|
||||
for method in dir(self.engine):
|
||||
if not callable(getattr(self.engine, method)):
|
||||
continue
|
||||
# don't forward if this function is overriden here
|
||||
# don't forward if this function is overridden here
|
||||
if hasattr(self, method):
|
||||
continue
|
||||
# don't forward if it is a internal function
|
||||
|
@ -115,7 +115,7 @@ C10_ALWAYS_INLINE static const std::
|
||||
// Not every backend and not every functionality counts as a "building block
|
||||
// key". This is mostly to give us more levers to pull in the design space.
|
||||
// Backend keys and functionality keys that count as "building blocks" will
|
||||
// contribute to a full cross product of functionality that can be overriden.
|
||||
// contribute to a full cross product of functionality that can be overridden.
|
||||
//
|
||||
// For example, right now we have at least 12 "backend" building
|
||||
// blocks (CPU, CUDA, XLA, ...) and at least 5 "functionality"
|
||||
|
@ -30,7 +30,7 @@ class TestBuffersOverride(torch._dynamo.test_case.TestCase):
|
||||
super().__init__()
|
||||
# Override buffers; should not cause breakage
|
||||
# but skip the marking static here since
|
||||
# named_buffers is overriden
|
||||
# named_buffers is overridden
|
||||
self.register_buffer("B", torch.ones(3, 3))
|
||||
self.named_buffers = []
|
||||
|
||||
|
@ -735,7 +735,7 @@ class SubclassTests(torch._dynamo.test_case.TestCase):
|
||||
|
||||
self.assertEqual(res_exp, res_act)
|
||||
|
||||
def test_user_overidden_method_unsupported(self):
|
||||
def test_user_overridden_method_unsupported(self):
|
||||
class LocalSubclass(torch.Tensor):
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
@ -755,7 +755,7 @@ class SubclassTests(torch._dynamo.test_case.TestCase):
|
||||
|
||||
self.assertEqual(res_exp, res_act)
|
||||
|
||||
def test_user_overidden_attr_unsupported(self):
|
||||
def test_user_overridden_attr_unsupported(self):
|
||||
class LocalSubclass(torch.Tensor):
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
@ -769,12 +769,12 @@ class SubclassTests(torch._dynamo.test_case.TestCase):
|
||||
def fn(x):
|
||||
return x.ndim
|
||||
|
||||
msg = "`torch.compile` only support tracing certain types of overriden tensor subclass attributes"
|
||||
msg = "`torch.compile` only support tracing certain types of overridden tensor subclass attributes"
|
||||
with self.assertRaisesRegex(torch._dynamo.exc.Unsupported, msg):
|
||||
x = torch.ones(2, 2).as_subclass(LocalSubclass)
|
||||
fn(x)
|
||||
|
||||
def test_user_overidden_property_unsupported(self):
|
||||
def test_user_overridden_property_unsupported(self):
|
||||
class LocalSubclass(torch.Tensor):
|
||||
def __init__(self, *args, **kwargs) -> None:
|
||||
self._ndim = 10
|
||||
@ -988,8 +988,8 @@ class SubclassTests(torch._dynamo.test_case.TestCase):
|
||||
self.assertEqual(x0, x1)
|
||||
self.assertEqual(x0.tensor_shape, x1.tensor_shape)
|
||||
|
||||
def test_subclass_dont_invoke_torch_function_on_overriden_method(self):
|
||||
# We shouldn't fire `__torch_function__` for overriden tensor methods.
|
||||
def test_subclass_dont_invoke_torch_function_on_overridden_method(self):
|
||||
# We shouldn't fire `__torch_function__` for overridden tensor methods.
|
||||
class MySubclass(torch.Tensor):
|
||||
def to(self, device):
|
||||
return self * len(device)
|
||||
@ -1011,10 +1011,10 @@ class SubclassTests(torch._dynamo.test_case.TestCase):
|
||||
res_act = fn_opt(x)
|
||||
self.assertEqual(res_exp, res_act)
|
||||
|
||||
def test_subclass_dont_invoke_torch_function_on_overriden_attr(self):
|
||||
def test_subclass_dont_invoke_torch_function_on_overridden_attr(self):
|
||||
from types import MethodWrapperType
|
||||
|
||||
# We shouldn't fire `__torch_function__` for overriden tensor attrs.
|
||||
# We shouldn't fire `__torch_function__` for overridden tensor attrs.
|
||||
class MySubclass(torch.Tensor):
|
||||
def ndim(self):
|
||||
return 42
|
||||
@ -1204,7 +1204,7 @@ class SubclassTests(torch._dynamo.test_case.TestCase):
|
||||
def test_nontraceable_tensor_subclass(self):
|
||||
# This will error if Dynamo tries to wrap it as a tensor variable,
|
||||
# because that involves calling certain methods to inspect the tensor
|
||||
# property, which will blow up in the overriden `__torch_function__`.
|
||||
# property, which will blow up in the overridden `__torch_function__`.
|
||||
class MySubclass(torch.Tensor):
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
|
@ -266,7 +266,7 @@ class UnspecTests(torch._dynamo.test_case.TestCase):
|
||||
self.assertEqual(rand2_1.getstate(), rand2_2.getstate())
|
||||
self.assertEqual(rand3_1.getstate(), rand3_2.getstate())
|
||||
|
||||
def test_random_object_overriden_methods(self):
|
||||
def test_random_object_overridden_methods(self):
|
||||
# these will result in graph breaks, but we shouldn't crash
|
||||
def get_rng():
|
||||
rand1 = random.Random(1)
|
||||
|
@ -136,7 +136,7 @@ class TestAOTInductorPackage(TestCase):
|
||||
|
||||
def test_remove_intermediate_files(self):
|
||||
# For CUDA, generated cpp files contain absolute path to the generated cubin files.
|
||||
# With the package artifact, that cubin path should be overriden at the run time,
|
||||
# With the package artifact, that cubin path should be overridden at the run time,
|
||||
# so removing those intermeidate files in this test to verify that.
|
||||
class Model(torch.nn.Module):
|
||||
def forward(self, x, y):
|
||||
|
@ -33,7 +33,7 @@ class TestLazyModules(TestCase):
|
||||
new_module.register_parameter("test_param", nn.Parameter(torch.ones(5, 5)))
|
||||
with self.assertRaisesRegex(RuntimeError, "shape of an uninitialized"):
|
||||
new_module.load_state_dict(state_dict)
|
||||
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
|
||||
# Uninitialized parameters are overridden when the state dict to be loaded contains a valid one
|
||||
new_module = LazyModule()
|
||||
new_module.register_parameter("test_param", nn.Parameter(torch.ones(5, 5)))
|
||||
module.load_state_dict(new_module.state_dict())
|
||||
@ -62,7 +62,7 @@ class TestLazyModules(TestCase):
|
||||
new_module.test_buffer = Buffer(torch.ones(5, 5))
|
||||
with self.assertRaisesRegex(RuntimeError, "shape of an uninitialized"):
|
||||
new_module.load_state_dict(state_dict)
|
||||
# Uninitialized parameters are overriden when the state dict to be loaded contains a valid one
|
||||
# Uninitialized parameters are overridden when the state dict to be loaded contains a valid one
|
||||
new_module = LazyModule()
|
||||
new_module.test_buffer = Buffer(torch.ones(5, 5))
|
||||
module.load_state_dict(new_module.state_dict())
|
||||
|
@ -144,7 +144,7 @@ class TestOverrideDict(common_utils.TestCase):
|
||||
self.assertEqual(len(self.override_dict), 0)
|
||||
self.assertNotIn("a", self.override_dict)
|
||||
|
||||
def test_overriden_key_precededs_base_key_regardless_of_insert_order(self):
|
||||
def test_overridden_key_precedes_base_key_regardless_of_insert_order(self):
|
||||
self.override_dict.set_base("a", 42)
|
||||
self.override_dict.override("a", 100)
|
||||
self.override_dict.set_base("a", 0)
|
||||
|
@ -2552,7 +2552,7 @@ class TestQuantizePT2E(PT2EQuantizationTestCase):
|
||||
torch.ops.quantized_decomposed.quantize_per_tensor.default,
|
||||
torch.ops.quantized_decomposed.dequantize_per_tensor.default,
|
||||
):
|
||||
# Entire graph share the same qspec which was overriden by FixedQParamsObserver
|
||||
# Entire graph share the same qspec which was overridden by FixedQParamsObserver
|
||||
self.assertEqual(n.args[1], 0.125)
|
||||
self.assertEqual(n.args[2], 42)
|
||||
|
||||
|
@ -317,13 +317,13 @@ class TestMAIATensor(common.TestCase):
|
||||
weight = torch.empty(6, 4, 2, 2, device="maia", requires_grad=True)
|
||||
bias = torch.empty(6, device="maia")
|
||||
|
||||
# Make sure forward is overriden
|
||||
# Make sure forward is overridden
|
||||
out = torch.nn.functional.conv2d(input, weight, bias, 2, 0, 1, 1)
|
||||
self.assertEqual(maia_extension.get_test_int(), 2)
|
||||
self.assertEqual(out.shape[0], input.shape[0])
|
||||
self.assertEqual(out.shape[1], weight.shape[0])
|
||||
|
||||
# Make sure backward is overriden
|
||||
# Make sure backward is overridden
|
||||
# Double backward is dispatched to _convolution_double_backward.
|
||||
# It is not tested here as it involves more computation/overrides.
|
||||
grad = torch.autograd.grad(out, input, out, create_graph=True)
|
||||
|
@ -1118,7 +1118,7 @@ CompositeImplicitAutograd[alias] fn_CompositeImplicitAutograd
|
||||
def test_duplicate_registrations(self):
|
||||
dispatcher = PythonDispatcher()
|
||||
|
||||
with self.assertRaisesRegex(RuntimeError, r"Overriden is not allowed"):
|
||||
with self.assertRaisesRegex(RuntimeError, r"Overridden is not allowed"):
|
||||
dispatcher.register(["CPU", "CPU"])
|
||||
|
||||
def test_defaultbackend_math(self):
|
||||
|
@ -368,7 +368,7 @@ class TensorLike:
|
||||
"""A class that overrides the full torch API
|
||||
|
||||
This class is used to explicitly test that the full torch.tensor API
|
||||
can be overriden with a class that defines __torch_function__.
|
||||
can be overridden with a class that defines __torch_function__.
|
||||
"""
|
||||
@classmethod
|
||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||
|
@ -232,7 +232,7 @@ class TestSchemaCheck(JitTestCase):
|
||||
actual = x.relu().sin()
|
||||
self.assertEqual(expected, actual)
|
||||
|
||||
# Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overriden
|
||||
# Tests that SchemaCheckMode wraps torch.Tensor when an argument's default is overridden
|
||||
def test_schema_check_mode_functionality_default_replaced(self):
|
||||
x = torch.rand((3, 3), requires_grad=True)
|
||||
expected = x.add(x, alpha=2)
|
||||
|
@ -999,7 +999,7 @@
|
||||
],
|
||||
"GB0103": [
|
||||
{
|
||||
"Gb_type": "Tensor subclass overriden method call",
|
||||
"Gb_type": "Tensor subclass overridden method call",
|
||||
"Context": "{name}",
|
||||
"Explanation": "`torch.compile` currently can't trace this",
|
||||
"Hints": [
|
||||
@ -1601,9 +1601,9 @@
|
||||
],
|
||||
"GB0164": [
|
||||
{
|
||||
"Gb_type": "Unsupported tensor subclass overriden attribute access",
|
||||
"Gb_type": "Unsupported tensor subclass overridden attribute access",
|
||||
"Context": "{name}",
|
||||
"Explanation": "`torch.compile` only support tracing certain types of overriden tensor subclass attributes",
|
||||
"Explanation": "`torch.compile` only support tracing certain types of overridden tensor subclass attributes",
|
||||
"Hints": [
|
||||
"Avoid accessing {name} of tensor subclass in torch.compile region",
|
||||
"Renaming attribute `{name}` of type {self.class_type}",
|
||||
|
@ -637,7 +637,7 @@ class PGOCacheArtifact(CacheArtifact):
|
||||
update the key to use the new MAST job's name and version.
|
||||
"""
|
||||
if not original_key.startswith("mast:"):
|
||||
# if original_key is overriden, then dont change it
|
||||
# if original_key is overridden, then dont change it
|
||||
return original_key
|
||||
if (new_key := get_cache_key()) is not None:
|
||||
return new_key
|
||||
|
@ -1758,7 +1758,7 @@ class RandomVariable(VariableTracker):
|
||||
"""random.Random()
|
||||
|
||||
Implemented by wrapping a VariableTracker around a random.Random object.
|
||||
The supported methods for the random.Random object cannot be overriden.
|
||||
The supported methods for the random.Random object cannot be overridden.
|
||||
Assumes that random objects behave the same given a set seed or state.
|
||||
"""
|
||||
|
||||
|
@ -487,7 +487,7 @@ def _get_subclass_type_var(tx: "InstructionTranslator", var):
|
||||
return VariableTracker.build(tx, var.python_type(), source)
|
||||
|
||||
|
||||
def _is_attr_overidden(tx: "InstructionTranslator", var, name):
|
||||
def _is_attr_overridden(tx: "InstructionTranslator", var, name):
|
||||
import torch
|
||||
|
||||
overridden = False
|
||||
@ -640,11 +640,11 @@ class TensorWithTFOverrideVariable(TensorVariable):
|
||||
],
|
||||
)
|
||||
|
||||
# Handle non-overriden attributes inherited from `torch.Tensor`.
|
||||
attr_is_overriden = _is_attr_overidden(tx, self, name)
|
||||
# Handle non-overridden attributes inherited from `torch.Tensor`.
|
||||
attr_is_overridden = _is_attr_overridden(tx, self, name)
|
||||
if (
|
||||
hasattr(torch.Tensor, name)
|
||||
and not attr_is_overriden
|
||||
and not attr_is_overridden
|
||||
and not inspect.ismethoddescriptor(getattr(torch.Tensor, name))
|
||||
):
|
||||
args, kwargs = [self], {}
|
||||
@ -694,11 +694,11 @@ class TensorWithTFOverrideVariable(TensorVariable):
|
||||
attr.__func__, self.class_type_var(tx), source=attr_source
|
||||
)
|
||||
|
||||
elif attr_is_overriden:
|
||||
elif attr_is_overridden:
|
||||
unimplemented_v2(
|
||||
gb_type="Unsupported tensor subclass overriden attribute access",
|
||||
gb_type="Unsupported tensor subclass overridden attribute access",
|
||||
context=f"{name}",
|
||||
explanation="`torch.compile` only support tracing certain types of overriden tensor subclass attributes",
|
||||
explanation="`torch.compile` only support tracing certain types of overridden tensor subclass attributes",
|
||||
hints=[
|
||||
f"Avoid accessing {name} of tensor subclass in torch.compile region",
|
||||
f"Renaming attribute `{name}` of type {self.class_type}",
|
||||
@ -735,9 +735,9 @@ class TensorWithTFOverrideVariable(TensorVariable):
|
||||
if can_dispatch_torch_function(tx, tf_args, kwargs):
|
||||
import torch
|
||||
|
||||
if _is_attr_overidden(tx, self, name):
|
||||
if _is_attr_overridden(tx, self, name):
|
||||
unimplemented_v2(
|
||||
gb_type="Tensor subclass overriden method call",
|
||||
gb_type="Tensor subclass overridden method call",
|
||||
context=f"{name}",
|
||||
explanation="`torch.compile` currently can't trace this",
|
||||
hints=[
|
||||
|
@ -258,7 +258,7 @@ class VmapInfo(NamedTuple):
|
||||
randomness: str
|
||||
|
||||
|
||||
def has_overriden_vmap_rule(autograd_function):
|
||||
def has_overridden_vmap_rule(autograd_function):
|
||||
return autograd_function.vmap is not torch.autograd.Function.vmap
|
||||
|
||||
|
||||
@ -286,14 +286,14 @@ def custom_function_call_vmap(interpreter, autograd_function, *operands, **kwarg
|
||||
)
|
||||
|
||||
if autograd_function.generate_vmap_rule:
|
||||
if has_overriden_vmap_rule(autograd_function):
|
||||
if has_overridden_vmap_rule(autograd_function):
|
||||
# TODO: Update link to stable once that's out
|
||||
# https://github.com/pytorch/pytorch/issues/92029
|
||||
raise RuntimeError(
|
||||
f"You tried to vmap over {autograd_function.__name__}, but "
|
||||
f"it has both generate_vmap_rule=True and an overriden vmap "
|
||||
f"it has both generate_vmap_rule=True and an overridden vmap "
|
||||
f"staticmethod. Please set generate_vmap_rule=False or delete "
|
||||
f"the overriden vmap staticmethod to avoid ambiguity. "
|
||||
f"the overridden vmap staticmethod to avoid ambiguity. "
|
||||
f"For more details, please see "
|
||||
f"https://pytorch.org/docs/main/notes/extending.func.html"
|
||||
)
|
||||
@ -301,7 +301,7 @@ def custom_function_call_vmap(interpreter, autograd_function, *operands, **kwarg
|
||||
interpreter, autograd_function, *operands
|
||||
)
|
||||
|
||||
if not has_overriden_vmap_rule(autograd_function):
|
||||
if not has_overridden_vmap_rule(autograd_function):
|
||||
# TODO: Update link to stable once that's out
|
||||
# https://github.com/pytorch/pytorch/issues/92029
|
||||
raise RuntimeError(
|
||||
|
@ -1227,7 +1227,7 @@ class TritonHOPifier:
|
||||
to the HOP (which can then be traced).
|
||||
|
||||
Because Dynamo has its own calling conventions for e.g. invoking a user-defined function
|
||||
TritonHOPifier is an abstract class that can be overriden by its subclasses.
|
||||
TritonHOPifier is an abstract class that can be overridden by its subclasses.
|
||||
"""
|
||||
|
||||
def raise_unsupported(self, msg: str) -> Never:
|
||||
|
@ -1647,7 +1647,7 @@ class trace:
|
||||
compile_profile = False
|
||||
|
||||
# Upload the .tar.gz file
|
||||
# Needs to be overriden based on specific environment needs
|
||||
# Needs to be overridden based on specific environment needs
|
||||
upload_tar: Optional[Callable[[str], None]] = None
|
||||
|
||||
log_autotuning_results: bool = False
|
||||
|
@ -92,10 +92,10 @@ class PythonDispatcher:
|
||||
"""
|
||||
|
||||
def register(self, dispatchKeys):
|
||||
# Overriden is not supported and triggers a warning in C++ dispatcher.
|
||||
# Overridden is not supported and triggers a warning in C++ dispatcher.
|
||||
if len(set(dispatchKeys)) != len(dispatchKeys):
|
||||
raise RuntimeError(
|
||||
f"Overriden is not allowed but found duplicates in {dispatchKeys}."
|
||||
f"Overridden is not allowed but found duplicates in {dispatchKeys}."
|
||||
)
|
||||
# We currently forbid this in codegen instead of C++ dispatcher.
|
||||
if (
|
||||
|
@ -385,7 +385,7 @@ class MetaTensorDescriber:
|
||||
is_leaf=is_leaf,
|
||||
requires_grad=t.requires_grad,
|
||||
# NB: ndim should be OK too but there is a disaster at
|
||||
# python test/dynamo/test_subclasses.py -k test_user_overidden_property_unsupported
|
||||
# python test/dynamo/test_subclasses.py -k test_user_overridden_property_unsupported
|
||||
# Actually, this means that we have a little bit of a problem
|
||||
# here, which is that there is some sensitivity to how exactly an
|
||||
# access is done if you have a __torch_function__ subclass. Maybe
|
||||
|
@ -32,7 +32,7 @@ class DataNormSparsifier(BaseDataSparsifier):
|
||||
zeros_per_block: Number of zeros in a sparse block
|
||||
Note::
|
||||
All arguments to the DataNormSparsifier constructor are "default"
|
||||
arguments and could be overriden by the configuration provided in the
|
||||
arguments and could be overridden by the configuration provided in the
|
||||
`add_data` step.
|
||||
"""
|
||||
|
||||
|
@ -52,7 +52,7 @@ class WeightNormSparsifier(BaseSparsifier):
|
||||
|
||||
Note::
|
||||
All arguments to the WeightNormSparsifier constructor are "default"
|
||||
arguments and could be overriden by the configuration provided in the
|
||||
arguments and could be overridden by the configuration provided in the
|
||||
`prepare` step.
|
||||
"""
|
||||
|
||||
|
@ -1502,7 +1502,7 @@ def insert_observers_for_model(
|
||||
|
||||
# first, populate the dtype map based only on qconfig and qhandler
|
||||
# this assumes:
|
||||
# graph inputs are fp32 by default, and int8 where overriden
|
||||
# graph inputs are fp32 by default, and int8 where overridden
|
||||
# other nodes output dtype is specified by the qconfig
|
||||
named_modules = dict(model.named_modules(remove_duplicate=False))
|
||||
|
||||
|
@ -1471,7 +1471,7 @@ c10::intrusive_ptr<at::ivalue::Future> Engine::execute_with_graph_task(
|
||||
return graph_task->future_result_;
|
||||
}
|
||||
|
||||
// note that when python is present, this base engine will be overriden
|
||||
// note that when python is present, this base engine will be overridden
|
||||
// with a PythonEngine. Because this typically happens before get_default_engine
|
||||
// is called, this base engine will never be created.
|
||||
Engine& Engine::get_base_engine() {
|
||||
|
@ -238,7 +238,7 @@ void registerPythonTensorClass(
|
||||
c10::Device dev(device);
|
||||
|
||||
TORCH_CHECK(
|
||||
dev.type() == kXLA, "Only the python class for XLA can be overriden");
|
||||
dev.type() == kXLA, "Only the python class for XLA can be overridden");
|
||||
if (device_to_py_class_[static_cast<size_t>(dev.type())] != nullptr) {
|
||||
TORCH_WARN(
|
||||
"Overriding a previously registered python class for ", dev.str());
|
||||
@ -409,13 +409,13 @@ static bool THPVariable_tryResurrect(THPVariable* self) {
|
||||
|
||||
static int THPFake_traverse(THPVariable* self, visitproc visit, void* arg) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
false, "TensorBase tp_traverse function was not overriden properly");
|
||||
false, "TensorBase tp_traverse function was not overridden properly");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int THPFake_clear(THPVariable* self) {
|
||||
TORCH_INTERNAL_ASSERT(
|
||||
false, "TensorBase tp_clear function was not overriden properly");
|
||||
false, "TensorBase tp_clear function was not overridden properly");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2330,7 +2330,7 @@ int THPVariableMetaType_init(PyObject* cls, PyObject* args, PyObject* kwargs) {
|
||||
if (PyType_Type.tp_init(cls, args, kwargs) < 0) {
|
||||
return -1;
|
||||
}
|
||||
// It is important for all three of these to be overriden correctly for the
|
||||
// It is important for all three of these to be overridden correctly for the
|
||||
// resurrection checks to properly happen. In particular, an older version
|
||||
// was not overriding tp_clear here. This lead to the default subtype_clear
|
||||
// running on the Tensor object (as only TensorBase tp_clear was custom),
|
||||
|
@ -500,7 +500,7 @@ class SimpleElasticAgent(ElasticAgent):
|
||||
group_rank = rdzv_info.rank
|
||||
group_world_size = rdzv_info.world_size
|
||||
|
||||
# master_addr/master_port could be explicitly overriden
|
||||
# master_addr/master_port could be explicitly overridden
|
||||
# TODO: BC - specific to static rdzv and can be simplifed further
|
||||
master_addr = spec.master_addr or rdzv_info.bootstrap_store_info.master_addr
|
||||
master_port = spec.master_port or rdzv_info.bootstrap_store_info.master_port
|
||||
|
@ -424,7 +424,7 @@ def get_testing_overrides() -> dict[Callable, Callable]:
|
||||
>>> inspect.signature(my_add)
|
||||
<Signature (input, other, out=None)>
|
||||
"""
|
||||
# Every function in the PyTorchAPI that can be overriden needs an entry
|
||||
# Every function in the PyTorchAPI that can be overridden needs an entry
|
||||
# in this dict.
|
||||
#
|
||||
# Optimally we would use inspect to get the function signature and define
|
||||
@ -1881,7 +1881,7 @@ def _get_overridable_functions() -> tuple[
|
||||
if ignore:
|
||||
continue
|
||||
|
||||
# cannot be overriden by __torch_function__
|
||||
# cannot be overridden by __torch_function__
|
||||
if func in get_ignored_functions():
|
||||
msg = (
|
||||
"{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
|
||||
|
@ -90,7 +90,7 @@ class Pattern:
|
||||
def match(self, event: _ProfilerEvent):
|
||||
"""
|
||||
Return True if the event matches the pattern.
|
||||
This method should be overriden in subclass.
|
||||
This method should be overridden in subclass.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
@ -1320,7 +1320,7 @@ def largeTensorTest(size, device=None, inductor=TEST_WITH_TORCHINDUCTOR):
|
||||
size may be a number of bytes, a string of the form "N GB", or a callable
|
||||
|
||||
If the test is a device generic test, available memory on the primary device will be checked.
|
||||
It can also be overriden by the optional `device=` argument.
|
||||
It can also be overridden by the optional `device=` argument.
|
||||
In other tests, the `device=` argument needs to be specified.
|
||||
"""
|
||||
if isinstance(size, str):
|
||||
|
@ -439,7 +439,7 @@ class ConfigModule(ModuleType):
|
||||
def _is_default(self, name: str) -> bool:
|
||||
"""
|
||||
Returns true if the config is at its default value.
|
||||
configs overriden by the env are not considered default.
|
||||
configs overridden by the env are not considered default.
|
||||
"""
|
||||
config_val = self._config[name]
|
||||
# The config is not overridden by the user, and the env_value_default
|
||||
|
@ -33,7 +33,7 @@ class OrderedSet(MutableSet[T], Reversible[T]):
|
||||
return s
|
||||
|
||||
#
|
||||
# Required overriden abstract methods
|
||||
# Required overridden abstract methods
|
||||
#
|
||||
def __contains__(self, elem: object) -> bool:
|
||||
return elem in self._dict
|
||||
|
@ -94,7 +94,7 @@ class AHTrainDecisionTree(AHTrain):
|
||||
|
||||
def get_grid_search_values(self):
|
||||
"""
|
||||
Standard values for grid search. Can be overriden.
|
||||
Standard values for grid search. Can be overridden.
|
||||
"""
|
||||
return {
|
||||
"max_depth": [5, 6, 7],
|
||||
|
Reference in New Issue
Block a user