From a69785b3eca362097a6d84eb887fa6c896171fb0 Mon Sep 17 00:00:00 2001 From: Xuehai Pan Date: Wed, 18 Jun 2025 00:11:00 +0800 Subject: [PATCH] [BE] fix typos in tools/ (#156082) Pull Request resolved: https://github.com/pytorch/pytorch/pull/156082 Approved by: https://github.com/soulitzer ghstack dependencies: #156079 --- .lintrunner.toml | 1 - tools/autograd/derivatives.yaml | 4 ++-- tools/autograd/gen_inplace_or_view_type.py | 2 +- tools/autograd/gen_python_functions.py | 4 ++-- tools/autograd/gen_trace_type.py | 2 +- tools/autograd/load_derivatives.py | 2 +- tools/bazel_tools/shellwrap.sh | 2 +- tools/build_defs/buck_helpers.bzl | 2 +- tools/build_defs/type_defs.bzl | 6 +++--- tools/build_with_debinfo.py | 4 ++-- tools/code_analyzer/gen_operators_yaml.py | 4 ++-- tools/code_coverage/package/tool/print_report.py | 4 ++-- tools/config/defs.bzl | 2 +- tools/dynamo/graph_break_registry.json | 6 +++--- tools/flight_recorder/components/builder.py | 2 +- tools/flight_recorder/components/loader.py | 2 +- tools/flight_recorder/components/utils.py | 2 +- tools/flight_recorder/fr_trace.py | 2 +- tools/linter/adapters/_linter.py | 2 +- tools/linter/dictionary.txt | 1 + tools/lldb/deploy_debugger.py | 2 +- tools/nightly.py | 4 ++-- tools/nvcc_fix_deps.py | 2 +- tools/packaging/build_wheel.py | 4 ++-- tools/stats/check_disabled_tests.py | 2 +- tools/stats/monitor.py | 2 +- tools/test/test_docstring_linter.py | 2 +- tools/testing/target_determination/heuristics/__init__.py | 2 +- tools/testing/test_run.py | 2 +- 29 files changed, 39 insertions(+), 39 deletions(-) diff --git a/.lintrunner.toml b/.lintrunner.toml index a96fdff0a183..6712709230f5 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -1164,7 +1164,6 @@ exclude_patterns = [ 'functorch/**', 'scripts/**', 'test/**', - 'tools/**', 'torch/**', 'torchgen/**', ] diff --git a/tools/autograd/derivatives.yaml b/tools/autograd/derivatives.yaml index fe4dd72b247d..e2419aab268b 100644 --- a/tools/autograd/derivatives.yaml +++ b/tools/autograd/derivatives.yaml @@ -113,7 +113,7 @@ # - `wrap_opt_if`, is a 2-argument function that accepts a tensor # variable and a boolean condition that dictates whether to save that # variable in a graph. The result of this function is `std::optional`, -# and it is `::std::nullopt` when the condition evalutes to `false`, +# and it is `::std::nullopt` when the condition evaluates to `false`, # otherwise it is the variable wrapped in `std::optional`. # For example, wrap_opt_if(var_0, grad_input_mask[1] || grad_input_mask[2]) # would mean that `var_0` is saved as long as the second (grad_input_mask[1]) @@ -200,7 +200,7 @@ # Undefined Tensors are created with the default constructor `at::Tensor()`. # It is an efficient way to represent a Tensor filled with zeros because # the Tensor holds no sizing information and no Storage data is allocated. -# But consequentially, Tensor operations cannot be performed on them. +# But consequently, Tensor operations cannot be performed on them. # Therefore, your backward function should treat an undefined output grad as # a zero, and it needs to be a special case. # diff --git a/tools/autograd/gen_inplace_or_view_type.py b/tools/autograd/gen_inplace_or_view_type.py index 0fd882d00cf1..684290da0a72 100644 --- a/tools/autograd/gen_inplace_or_view_type.py +++ b/tools/autograd/gen_inplace_or_view_type.py @@ -2,7 +2,7 @@ # # NOTE: If any changes are being made to the ADInplaceOrView codegen please also check # if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp -# The fallback is expected to mimick this codegen, so we should keep the two in sync. +# The fallback is expected to mimic this codegen, so we should keep the two in sync. from __future__ import annotations diff --git a/tools/autograd/gen_python_functions.py b/tools/autograd/gen_python_functions.py index f1e0140a4155..995243a9e6b4 100644 --- a/tools/autograd/gen_python_functions.py +++ b/tools/autograd/gen_python_functions.py @@ -148,7 +148,7 @@ _SKIP_PYTHON_BINDINGS = [ "mH", # these need to be an attributes in Python, not functions "nonzero(_(out|numpy))?", "set_data", - ".*_overrideable", # overrideable functions for backend extension + ".*_overrideable", # overridable functions for backend extension "data", "is_leaf", "output_nr", @@ -617,7 +617,7 @@ def load_deprecated_signatures( schema_args_by_name = {a.name: a for a in schema.arguments.flat_all} for name in call_args: assert name in schema_args_by_name or name in known_constants, ( - f"deprecation definiton: Unrecognized value {name}" + f"deprecation definition: Unrecognized value {name}" ) # Map deprecated signature arguments to their aten signature and test diff --git a/tools/autograd/gen_trace_type.py b/tools/autograd/gen_trace_type.py index 67f71d2df503..21069b4671e2 100644 --- a/tools/autograd/gen_trace_type.py +++ b/tools/autograd/gen_trace_type.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: # Note [Manual Backend kernels] # For these ops, we want to manually register to dispatch key Backend and -# skip codegen-ed registeration to all keys before Backend. +# skip codegen-ed registration to all keys before Backend. # For codegen this means: # - op set below must match ops with manual_kernel_registration=True in native_functions.yaml # where we skip codegen backend kernels diff --git a/tools/autograd/load_derivatives.py b/tools/autograd/load_derivatives.py index 6df4d389fa55..f61226f25fb9 100644 --- a/tools/autograd/load_derivatives.py +++ b/tools/autograd/load_derivatives.py @@ -336,7 +336,7 @@ def postprocess_forward_derivatives( # This transformation is based on the observation that for element-wise functions, the Jacobian # matrix is diagonal and thus doing J * v is the same as (v^T J)^T (in practice, we ignore the transpositions) # For the complex case, we use hermitian transpose and get (v.conj() J).conj() - # So here we are going to re-use the backward formula and replace two things: + # So here we are going to reuse the backward formula and replace two things: # 1) all occurrences of "grad" with "foo_t.conj()", where foo is the name of the unique differentiable input. # 2) all usage of an original input "foo" with its primal value "foo_p". # 3) conjugate the final result diff --git a/tools/bazel_tools/shellwrap.sh b/tools/bazel_tools/shellwrap.sh index 1ebab29a6a73..712788ae09e0 100755 --- a/tools/bazel_tools/shellwrap.sh +++ b/tools/bazel_tools/shellwrap.sh @@ -54,5 +54,5 @@ echo "Entering interactive shell at the execution root:" # quote escape all the arguments to use as a single input string cmd="'$shell' --noprofile --rcfile '$rcfile'" -# run the command in a script psuedo terminal and dump to null +# run the command in a script pseudo terminal and dump to null /usr/bin/script -c "$cmd" -q /dev/null diff --git a/tools/build_defs/buck_helpers.bzl b/tools/build_defs/buck_helpers.bzl index 2353fae91101..aced2308ba24 100644 --- a/tools/build_defs/buck_helpers.bzl +++ b/tools/build_defs/buck_helpers.bzl @@ -24,7 +24,7 @@ ONLY_AVAILABLE_IN_BUCK2 = [ def filter_attributes(kwgs): keys = list(kwgs.keys()) - # drop unncessary attributes + # drop unnecessary attributes for key in keys: if key in IGNORED_ATTRIBUTES or key in ONLY_AVAILABLE_IN_BUCK2: kwgs.pop(key) diff --git a/tools/build_defs/type_defs.bzl b/tools/build_defs/type_defs.bzl index 7a905e7d6cc0..6dc5ffe42d17 100644 --- a/tools/build_defs/type_defs.bzl +++ b/tools/build_defs/type_defs.bzl @@ -83,7 +83,7 @@ def is_bool(arg): """Checks if provided instance is a boolean value. Args: - arg: An instance ot check. type: Any + arg: An instance to check. type: Any Returns: True for boolean values, False otherwise. rtype: bool @@ -96,7 +96,7 @@ def is_number(arg): """Checks if provided instance is a number value. Args: - arg: An instance ot check. type: Any + arg: An instance to check. type: Any Returns: True for number values, False otherwise. rtype: bool @@ -109,7 +109,7 @@ def is_struct(arg): """Checks if provided instance is a struct value. Args: - arg: An instance ot check. type: Any + arg: An instance to check. type: Any Returns: True for struct values, False otherwise. rtype: bool diff --git a/tools/build_with_debinfo.py b/tools/build_with_debinfo.py index 73c9dba0090b..8742966aabe8 100755 --- a/tools/build_with_debinfo.py +++ b/tools/build_with_debinfo.py @@ -38,7 +38,7 @@ def get_lib_extension() -> str: return "so" if sys.platform == "darwin": return "dylib" - raise RuntimeError(f"Usupported platform {sys.platform}") + raise RuntimeError(f"Unsupported platform {sys.platform}") def create_symlinks() -> None: @@ -78,7 +78,7 @@ def create_build_plan() -> list[tuple[str, str]]: if line.startswith(": &&") and line.endswith("&& :"): line = line[4:-4] line = line.replace("-O2", "-g").replace("-O3", "-g") - # Build Metal shaders with debug infomation + # Build Metal shaders with debug information if "xcrun metal " in line and "-frecord-sources" not in line: line += " -frecord-sources -gline-tables-only" try: diff --git a/tools/code_analyzer/gen_operators_yaml.py b/tools/code_analyzer/gen_operators_yaml.py index ede651679847..a5a80c1c66f5 100644 --- a/tools/code_analyzer/gen_operators_yaml.py +++ b/tools/code_analyzer/gen_operators_yaml.py @@ -68,13 +68,13 @@ from torchgen.selective_build.selector import merge_kernel_metadata # used by training, and not just the root operators. All Training ops are # also considered for inference, so these are merged into inference ops. # -# 3. Operator Depencency Graph (--dep-graph-yaml-path): A path to the +# 3. Operator Dependency Graph (--dep-graph-yaml-path): A path to the # operator dependency graph used to determine which operators depend on # which other operators for correct functioning. This is used for # generating the transitive closure of all the operators used by the # model based on the root operators when static selective build is used. # For tracing based selective build, we don't need to perform this -# transitive cloure. +# transitive closure. # # 4. Model Metadata (--model-name, --model-versions, --model-assets, # --model-backends): Self-descriptive. These are used to tell this diff --git a/tools/code_coverage/package/tool/print_report.py b/tools/code_coverage/package/tool/print_report.py index 26c20aca231a..ea099751d740 100644 --- a/tools/code_coverage/package/tool/print_report.py +++ b/tools/code_coverage/package/tool/print_report.py @@ -133,7 +133,7 @@ def print_file_oriented_report( coverage_percentage = print_file_summary( covered_summary, total_summary, summary_file ) - # print test condition (interested folder / tests that are successsful or failed) + # print test condition (interested folder / tests that are successful or failed) print_test_condition( tests, tests_type, @@ -204,7 +204,7 @@ def html_oriented_report() -> None: # use lcov to generate the coverage report build_folder = os.path.join(get_pytorch_folder(), "build") coverage_info_file = os.path.join(SUMMARY_FOLDER_DIR, "coverage.info") - # generage coverage report -- coverage.info in build folder + # generate coverage report -- coverage.info in build folder subprocess.check_call( [ "lcov", diff --git a/tools/config/defs.bzl b/tools/config/defs.bzl index 6ddd0e991561..f8a1e9dc16f2 100644 --- a/tools/config/defs.bzl +++ b/tools/config/defs.bzl @@ -27,7 +27,7 @@ def if_rocm(if_true, if_false = []): def if_sycl(if_true, if_false = []): """Helper for selecting based on the whether SYCL/ComputeCPP is configured.""" - # NOTE: Tensorflow expects some stange behavior (see their if_sycl) if we + # NOTE: Tensorflow expects some strange behavior (see their if_sycl) if we # actually plan on supporting this at some point. return select({ "//conditions:default": if_false, diff --git a/tools/dynamo/graph_break_registry.json b/tools/dynamo/graph_break_registry.json index ac0dabe7a556..6a773c8fd168 100644 --- a/tools/dynamo/graph_break_registry.json +++ b/tools/dynamo/graph_break_registry.json @@ -270,7 +270,7 @@ { "Gb_type": "Caught non-Exception value", "Context": "str(exc_instance)", - "Explanation": "Except expects to recieve an object of Exception type but received {exc_instance}.", + "Explanation": "Except expects to receive an object of Exception type but received {exc_instance}.", "Hints": [ "Dynamo has detected that tracing the code will result in an error when running in eager. Please double check that your code doesn't contain a similar error when actually running eager/uncompiled." ] @@ -330,7 +330,7 @@ "Hints": [ "Use `torch._assert()` to raise a hard AssertionError when the check fails. ", "This error will propagate back the user code ", - "that called the compiled function (i.e. Dynamo wil not trace any exception handling).", + "that called the compiled function (i.e. Dynamo will not trace any exception handling).", "Remove the assert statement.", "Move the assert statement outside of any context managers in order to graph break with ", "partial graph compilation (if fullgraph=False).", @@ -2050,7 +2050,7 @@ { "Gb_type": "torch.distributed package is not available!", "Context": "", - "Explanation": "The PyTorch package doesn't include torch.distributed when builing from source.", + "Explanation": "The PyTorch package doesn't include torch.distributed when building from source.", "Hints": [ "Set USE_DISTRIBUTED=1 to enable it when building PyTorch from source." ] diff --git a/tools/flight_recorder/components/builder.py b/tools/flight_recorder/components/builder.py index 9ff3f3c68d45..2a9cee36f7bc 100644 --- a/tools/flight_recorder/components/builder.py +++ b/tools/flight_recorder/components/builder.py @@ -300,7 +300,7 @@ def build_collectives( for _ in range(1, num_coalesced_entries): all_entries[i].pop(k) else: - # Iterate through all the ranks and check if there is a mis-match for the current entry. + # Iterate through all the ranks and check if there is a mismatch for the current entry. check_current_entry_match( all_entries, _pg_guids, diff --git a/tools/flight_recorder/components/loader.py b/tools/flight_recorder/components/loader.py index d836779b585f..dd2eb109aa56 100644 --- a/tools/flight_recorder/components/loader.py +++ b/tools/flight_recorder/components/loader.py @@ -46,7 +46,7 @@ exp = re.compile(r"([\w\-\_]*?)(\d+)$") def _determine_prefix(files: list[str]) -> str: """If the user doesn't specify a prefix, but does pass a dir full of similarly-prefixed files, we should be able to - infer the common prefix most of the time. But if we can't confidently infer, just fall back to requring the user + infer the common prefix most of the time. But if we can't confidently infer, just fall back to requiring the user to specify it """ possible_prefixes: defaultdict[str, set[int]] = defaultdict(set) diff --git a/tools/flight_recorder/components/utils.py b/tools/flight_recorder/components/utils.py index 5a5063a15978..944115a25eb2 100644 --- a/tools/flight_recorder/components/utils.py +++ b/tools/flight_recorder/components/utils.py @@ -312,7 +312,7 @@ def match_coalesced_groups_with_non_p2p( {first_rank}, ) - # Iterate through all the ranks and check if there is a mis-match for the current entry. + # Iterate through all the ranks and check if there is a mismatch for the current entry. check_current_entry_match( all_coalesced_entries, _pg_guids, diff --git a/tools/flight_recorder/fr_trace.py b/tools/flight_recorder/fr_trace.py index aebd914eb467..1d8abcefabfa 100644 --- a/tools/flight_recorder/fr_trace.py +++ b/tools/flight_recorder/fr_trace.py @@ -14,7 +14,7 @@ Not Yet Implemented - TODO- tracebacks aren't implemented Known Issues -- Flight Recorder buffer sequence_id information is not sufficient to match collectives and coalseced collectives +- Flight Recorder buffer sequence_id information is not sufficient to match collectives and coalesced collectives unless we have the trace data from the beginning of the program. To enable confident analysis of trace buffers that do not start from zero (and to simplify the script's matching logic) we need to add more information to the recorder. - Currently, the script omits checking the 'status' of collectives. We can look for the first 'non completed' diff --git a/tools/linter/adapters/_linter.py b/tools/linter/adapters/_linter.py index 0b767d5fb817..6f3048d9b777 100644 --- a/tools/linter/adapters/_linter.py +++ b/tools/linter/adapters/_linter.py @@ -413,7 +413,7 @@ class FileLinter(Generic[PythonFileT], ABC): return not results or self.args.fix and all(r.is_edit for r in results) def _error(self, pf: PythonFileT, result: LintResult) -> None: - """Called on files that are unparseable""" + """Called on files that are unparsable""" def _replace(self, pf: PythonFileT) -> tuple[str, list[LintResult]]: # Because of recursive replacements, we need to repeat replacing and reparsing diff --git a/tools/linter/dictionary.txt b/tools/linter/dictionary.txt index ddcab8116cc8..93be13fa6510 100644 --- a/tools/linter/dictionary.txt +++ b/tools/linter/dictionary.txt @@ -1,5 +1,6 @@ coo fro +froms hsa nd optins diff --git a/tools/lldb/deploy_debugger.py b/tools/lldb/deploy_debugger.py index 135a6167e3a4..7a28c72a6caf 100644 --- a/tools/lldb/deploy_debugger.py +++ b/tools/lldb/deploy_debugger.py @@ -25,7 +25,7 @@ from pathlib import Path stem = Path(name).stem with NamedTemporaryFile(prefix=stem, suffix='.so', delete=False) as tf: tf.write(r) - print("torch_deploy registering debug inforation for ", tf.name) + print("torch_deploy registering debug information for ", tf.name) cmd1 = f"target modules add {tf.name}" # print(cmd1) lldb.debugger.HandleCommand(cmd1) diff --git a/tools/nightly.py b/tools/nightly.py index 5f129fbc2ad9..829935b0cd07 100755 --- a/tools/nightly.py +++ b/tools/nightly.py @@ -14,7 +14,7 @@ Or if you would like to check out the nightly commit in detached HEAD mode:: $ ./tools/nightly.py checkout $ source venv/bin/activate # or `& .\venv\Scripts\Activate.ps1` on Windows -Or if you would like to re-use an existing virtual environment, you can pass in +Or if you would like to reuse an existing virtual environment, you can pass in the prefix argument (--prefix):: $ ./tools/nightly.py checkout -b my-nightly-branch -p my-env @@ -686,7 +686,7 @@ def _nightly_version(site_dir: Path) -> str: @timed("Checking out nightly PyTorch") def checkout_nightly_version(branch: str | None, site_dir: Path) -> None: - """Get's the nightly version and then checks it out.""" + """Gets the nightly version and then checks it out.""" nightly_version = _nightly_version(site_dir) if branch is None: # Detached mode - explicitly use --detach flag diff --git a/tools/nvcc_fix_deps.py b/tools/nvcc_fix_deps.py index 0c0c9db66693..a4a3b536eeae 100644 --- a/tools/nvcc_fix_deps.py +++ b/tools/nvcc_fix_deps.py @@ -1,4 +1,4 @@ -"""Tool to fix the nvcc's dependecy file output +"""Tool to fix the nvcc's dependency file output Usage: python nvcc_fix_deps.py nvcc [nvcc args]... diff --git a/tools/packaging/build_wheel.py b/tools/packaging/build_wheel.py index 96e4978c7fcd..16e9a87bd963 100644 --- a/tools/packaging/build_wheel.py +++ b/tools/packaging/build_wheel.py @@ -62,7 +62,7 @@ def venv(interpreter: str) -> Iterator[str]: class Builder: - # The python interpeter that we should be using + # The python interpreter that we should be using interpreter: str def __init__(self, interpreter: str) -> None: @@ -124,7 +124,7 @@ def main() -> None: with venv(interpreter) as venv_interpreter: builder = Builder(venv_interpreter) # clean actually requires setuptools so we need to ensure we - # install requriements before + # install requirements before builder.install_requirements() builder.clean() diff --git a/tools/stats/check_disabled_tests.py b/tools/stats/check_disabled_tests.py index 5505dc265929..f1f8e2f99ee8 100644 --- a/tools/stats/check_disabled_tests.py +++ b/tools/stats/check_disabled_tests.py @@ -173,7 +173,7 @@ def save_results( all_tests: dict[str, dict[str, int]], ) -> None: """ - Save the result to S3, which then gets put into the HUD backened database + Save the result to S3, which then gets put into the HUD backend database """ should_be_enabled_tests = { name: stats diff --git a/tools/stats/monitor.py b/tools/stats/monitor.py index a79f50bc141e..a5affc2510b7 100644 --- a/tools/stats/monitor.py +++ b/tools/stats/monitor.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ A Python script that logging the system-level utilization usage in json format. -Data collected: CPU, memory, GPU memeory utilzation, and GPU utilization if available. +Data collected: CPU, memory, GPU memory utilization, and GPU utilization if available. Usage: - To run the script with default data collect time setting, use the following command: diff --git a/tools/test/test_docstring_linter.py b/tools/test/test_docstring_linter.py index 2573058c36f0..5edf593e9fdb 100644 --- a/tools/test/test_docstring_linter.py +++ b/tools/test/test_docstring_linter.py @@ -54,7 +54,7 @@ class TestDocstringLinter(LinterTestCase): grandfather_file = f"{td}/grandfather.json" grandfather = f"--grandfather={grandfather_file}" - # Find some faiures + # Find some failures run("before.txt", grandfather) # Rewrite grandfather file diff --git a/tools/testing/target_determination/heuristics/__init__.py b/tools/testing/target_determination/heuristics/__init__.py index 1bd5940abbb1..388b72425457 100644 --- a/tools/testing/target_determination/heuristics/__init__.py +++ b/tools/testing/target_determination/heuristics/__init__.py @@ -33,7 +33,7 @@ if TYPE_CHECKING: # All currently running heuristics. -# To add a heurstic in trial mode, specify the keywork argument `trial_mode=True`. +# To add a heurstic in trial mode, specify the keyword argument `trial_mode=True`. HEURISTICS: list[HeuristicInterface] = [ PreviouslyFailedInPR(), EditedByPR(), diff --git a/tools/testing/test_run.py b/tools/testing/test_run.py index 81bdfc4d7088..aa4efa6d890c 100644 --- a/tools/testing/test_run.py +++ b/tools/testing/test_run.py @@ -285,7 +285,7 @@ class ShardedTest: if not isinstance(other, ShardedTest): raise NotImplementedError - # This is how the list was implicity sorted when it was a NamedTuple + # This is how the list was implicitly sorted when it was a NamedTuple if self.name != other.name: return self.name < other.name if self.shard != other.shard: