diff --git a/torch/_inductor/codegen/wrapper.py b/torch/_inductor/codegen/wrapper.py index 5fedc0cb99ca..d76f943fde25 100644 --- a/torch/_inductor/codegen/wrapper.py +++ b/torch/_inductor/codegen/wrapper.py @@ -1078,7 +1078,7 @@ class PythonWrapperCodegen(CodeGen): arg.get_dtype() if isinstance(arg, IRNode) else type(arg) for arg in raw_args ] - # Because generate_kernel_call can be overriden by a subclass, explictly call + # Because generate_kernel_call can be overriden by a subclass, explicitly call # PythonWrapperCodegen.generate_kernel_call here PythonWrapperCodegen.generate_kernel_call( self, @@ -2376,7 +2376,7 @@ class PythonWrapperCodegen(CodeGen): def codegen_subgraph_prefix(self, subgraph, outer_inputs, outer_outputs): # All inputs of hops must be explicitly passed in. - # Free tensors and basic symbols should have been explictily lifted as inputs in dynamo. + # Free tensors and basic symbols should have been explicitly lifted as inputs in dynamo. assert len(outer_inputs) == len( subgraph.graph.graph_input_names ), f"graph_input_names:{subgraph.graph.graph_input_names}, outer_inputs: {outer_inputs}" diff --git a/torch/csrc/jit/python/pybind_utils.h b/torch/csrc/jit/python/pybind_utils.h index 4c108904cc58..273c6acd49ce 100644 --- a/torch/csrc/jit/python/pybind_utils.h +++ b/torch/csrc/jit/python/pybind_utils.h @@ -691,7 +691,7 @@ inline IValue toTypeInferredIValue(py::handle input) { if (auto mod = as_module(object)) { // if obj is already a ScriptModule, just return its ivalue auto ptr = mod.value()._ivalue(); - // explict copy semantics for strong ownership of the resource. + // explicit copy semantics for strong ownership of the resource. return c10::intrusive_ptr::reclaim_copy( ptr.release()); } diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 1b0442c15423..ff89dd79f2d0 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -1811,7 +1811,7 @@ def _shutdown_backend(pg): except RuntimeError: pass if is_nccl_available() and isinstance(backend, ProcessGroupNCCL): - # explictly call shutdown to ensure that NCCL resources are released + # explicitly call shutdown to ensure that NCCL resources are released backend._shutdown() diff --git a/torch/distributed/pipelining/_backward.py b/torch/distributed/pipelining/_backward.py index e2eebf49ad77..a31ee53206ab 100644 --- a/torch/distributed/pipelining/_backward.py +++ b/torch/distributed/pipelining/_backward.py @@ -344,7 +344,7 @@ def stage_backward( # 2. extract_tensors_with_grads referred to both stage_output_tensors, output_grad_tensors, # and to itself (extract_tensors_with_grads) since it makes a recursive call # 3. stage_output_tensors was kept alive by the above refcycle, and it holds activation tensors, which is bad - # fix -> explictly pass in the ref to the fn, so there is no gc cycle anymore + # fix -> explicitly pass in the ref to the fn, so there is no gc cycle anymore extract_tensors_with_grads( stage_output, output_grads, extract_tensors_with_grads ) diff --git a/torchgen/gen_aoti_c_shim.py b/torchgen/gen_aoti_c_shim.py index 9846de77b430..da4abab91c83 100644 --- a/torchgen/gen_aoti_c_shim.py +++ b/torchgen/gen_aoti_c_shim.py @@ -142,7 +142,7 @@ def convert_arg_type_and_name( # type: ignore[return] new_callsite_exprs, ) elif isinstance(typ, ListType): - # Need to explictly pass the list as pointer + length + # Need to explicitly pass the list as pointer + length c_types, names, aten_types, _ = convert_arg_type_and_name(typ.elem, name) assert len(c_types) == 1, "ListType with unsupported element type " + repr(typ)