mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
[BE] fix typos in tools/ (#156082)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/156082 Approved by: https://github.com/soulitzer ghstack dependencies: #156079
This commit is contained in:
committed by
PyTorch MergeBot
parent
ccea6ddac3
commit
a69785b3ec
@ -1164,7 +1164,6 @@ exclude_patterns = [
|
||||
'functorch/**',
|
||||
'scripts/**',
|
||||
'test/**',
|
||||
'tools/**',
|
||||
'torch/**',
|
||||
'torchgen/**',
|
||||
]
|
||||
|
@ -113,7 +113,7 @@
|
||||
# - `wrap_opt_if`, is a 2-argument function that accepts a tensor
|
||||
# variable and a boolean condition that dictates whether to save that
|
||||
# variable in a graph. The result of this function is `std::optional<Tensor>`,
|
||||
# and it is `::std::nullopt` when the condition evalutes to `false`,
|
||||
# and it is `::std::nullopt` when the condition evaluates to `false`,
|
||||
# otherwise it is the variable wrapped in `std::optional<Tensor>`.
|
||||
# For example, wrap_opt_if(var_0, grad_input_mask[1] || grad_input_mask[2])
|
||||
# would mean that `var_0` is saved as long as the second (grad_input_mask[1])
|
||||
@ -200,7 +200,7 @@
|
||||
# Undefined Tensors are created with the default constructor `at::Tensor()`.
|
||||
# It is an efficient way to represent a Tensor filled with zeros because
|
||||
# the Tensor holds no sizing information and no Storage data is allocated.
|
||||
# But consequentially, Tensor operations cannot be performed on them.
|
||||
# But consequently, Tensor operations cannot be performed on them.
|
||||
# Therefore, your backward function should treat an undefined output grad as
|
||||
# a zero, and it needs to be a special case.
|
||||
#
|
||||
|
@ -2,7 +2,7 @@
|
||||
#
|
||||
# NOTE: If any changes are being made to the ADInplaceOrView codegen please also check
|
||||
# if updates are needed in torch/csrc/autograd/autograd_not_implemented_fallback.cpp
|
||||
# The fallback is expected to mimick this codegen, so we should keep the two in sync.
|
||||
# The fallback is expected to mimic this codegen, so we should keep the two in sync.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
@ -148,7 +148,7 @@ _SKIP_PYTHON_BINDINGS = [
|
||||
"mH", # these need to be an attributes in Python, not functions
|
||||
"nonzero(_(out|numpy))?",
|
||||
"set_data",
|
||||
".*_overrideable", # overrideable functions for backend extension
|
||||
".*_overrideable", # overridable functions for backend extension
|
||||
"data",
|
||||
"is_leaf",
|
||||
"output_nr",
|
||||
@ -617,7 +617,7 @@ def load_deprecated_signatures(
|
||||
schema_args_by_name = {a.name: a for a in schema.arguments.flat_all}
|
||||
for name in call_args:
|
||||
assert name in schema_args_by_name or name in known_constants, (
|
||||
f"deprecation definiton: Unrecognized value {name}"
|
||||
f"deprecation definition: Unrecognized value {name}"
|
||||
)
|
||||
|
||||
# Map deprecated signature arguments to their aten signature and test
|
||||
|
@ -17,7 +17,7 @@ if TYPE_CHECKING:
|
||||
|
||||
# Note [Manual Backend kernels]
|
||||
# For these ops, we want to manually register to dispatch key Backend and
|
||||
# skip codegen-ed registeration to all keys before Backend.
|
||||
# skip codegen-ed registration to all keys before Backend.
|
||||
# For codegen this means:
|
||||
# - op set below must match ops with manual_kernel_registration=True in native_functions.yaml
|
||||
# where we skip codegen backend kernels
|
||||
|
@ -336,7 +336,7 @@ def postprocess_forward_derivatives(
|
||||
# This transformation is based on the observation that for element-wise functions, the Jacobian
|
||||
# matrix is diagonal and thus doing J * v is the same as (v^T J)^T (in practice, we ignore the transpositions)
|
||||
# For the complex case, we use hermitian transpose and get (v.conj() J).conj()
|
||||
# So here we are going to re-use the backward formula and replace two things:
|
||||
# So here we are going to reuse the backward formula and replace two things:
|
||||
# 1) all occurrences of "grad" with "foo_t.conj()", where foo is the name of the unique differentiable input.
|
||||
# 2) all usage of an original input "foo" with its primal value "foo_p".
|
||||
# 3) conjugate the final result
|
||||
|
@ -54,5 +54,5 @@ echo "Entering interactive shell at the execution root:"
|
||||
# quote escape all the arguments to use as a single input string
|
||||
cmd="'$shell' --noprofile --rcfile '$rcfile'"
|
||||
|
||||
# run the command in a script psuedo terminal and dump to null
|
||||
# run the command in a script pseudo terminal and dump to null
|
||||
/usr/bin/script -c "$cmd" -q /dev/null
|
||||
|
@ -24,7 +24,7 @@ ONLY_AVAILABLE_IN_BUCK2 = [
|
||||
def filter_attributes(kwgs):
|
||||
keys = list(kwgs.keys())
|
||||
|
||||
# drop unncessary attributes
|
||||
# drop unnecessary attributes
|
||||
for key in keys:
|
||||
if key in IGNORED_ATTRIBUTES or key in ONLY_AVAILABLE_IN_BUCK2:
|
||||
kwgs.pop(key)
|
||||
|
@ -83,7 +83,7 @@ def is_bool(arg):
|
||||
"""Checks if provided instance is a boolean value.
|
||||
|
||||
Args:
|
||||
arg: An instance ot check. type: Any
|
||||
arg: An instance to check. type: Any
|
||||
|
||||
Returns:
|
||||
True for boolean values, False otherwise. rtype: bool
|
||||
@ -96,7 +96,7 @@ def is_number(arg):
|
||||
"""Checks if provided instance is a number value.
|
||||
|
||||
Args:
|
||||
arg: An instance ot check. type: Any
|
||||
arg: An instance to check. type: Any
|
||||
|
||||
Returns:
|
||||
True for number values, False otherwise. rtype: bool
|
||||
@ -109,7 +109,7 @@ def is_struct(arg):
|
||||
"""Checks if provided instance is a struct value.
|
||||
|
||||
Args:
|
||||
arg: An instance ot check. type: Any
|
||||
arg: An instance to check. type: Any
|
||||
|
||||
Returns:
|
||||
True for struct values, False otherwise. rtype: bool
|
||||
|
@ -38,7 +38,7 @@ def get_lib_extension() -> str:
|
||||
return "so"
|
||||
if sys.platform == "darwin":
|
||||
return "dylib"
|
||||
raise RuntimeError(f"Usupported platform {sys.platform}")
|
||||
raise RuntimeError(f"Unsupported platform {sys.platform}")
|
||||
|
||||
|
||||
def create_symlinks() -> None:
|
||||
@ -78,7 +78,7 @@ def create_build_plan() -> list[tuple[str, str]]:
|
||||
if line.startswith(": &&") and line.endswith("&& :"):
|
||||
line = line[4:-4]
|
||||
line = line.replace("-O2", "-g").replace("-O3", "-g")
|
||||
# Build Metal shaders with debug infomation
|
||||
# Build Metal shaders with debug information
|
||||
if "xcrun metal " in line and "-frecord-sources" not in line:
|
||||
line += " -frecord-sources -gline-tables-only"
|
||||
try:
|
||||
|
@ -68,13 +68,13 @@ from torchgen.selective_build.selector import merge_kernel_metadata
|
||||
# used by training, and not just the root operators. All Training ops are
|
||||
# also considered for inference, so these are merged into inference ops.
|
||||
#
|
||||
# 3. Operator Depencency Graph (--dep-graph-yaml-path): A path to the
|
||||
# 3. Operator Dependency Graph (--dep-graph-yaml-path): A path to the
|
||||
# operator dependency graph used to determine which operators depend on
|
||||
# which other operators for correct functioning. This is used for
|
||||
# generating the transitive closure of all the operators used by the
|
||||
# model based on the root operators when static selective build is used.
|
||||
# For tracing based selective build, we don't need to perform this
|
||||
# transitive cloure.
|
||||
# transitive closure.
|
||||
#
|
||||
# 4. Model Metadata (--model-name, --model-versions, --model-assets,
|
||||
# --model-backends): Self-descriptive. These are used to tell this
|
||||
|
@ -133,7 +133,7 @@ def print_file_oriented_report(
|
||||
coverage_percentage = print_file_summary(
|
||||
covered_summary, total_summary, summary_file
|
||||
)
|
||||
# print test condition (interested folder / tests that are successsful or failed)
|
||||
# print test condition (interested folder / tests that are successful or failed)
|
||||
print_test_condition(
|
||||
tests,
|
||||
tests_type,
|
||||
@ -204,7 +204,7 @@ def html_oriented_report() -> None:
|
||||
# use lcov to generate the coverage report
|
||||
build_folder = os.path.join(get_pytorch_folder(), "build")
|
||||
coverage_info_file = os.path.join(SUMMARY_FOLDER_DIR, "coverage.info")
|
||||
# generage coverage report -- coverage.info in build folder
|
||||
# generate coverage report -- coverage.info in build folder
|
||||
subprocess.check_call(
|
||||
[
|
||||
"lcov",
|
||||
|
@ -27,7 +27,7 @@ def if_rocm(if_true, if_false = []):
|
||||
def if_sycl(if_true, if_false = []):
|
||||
"""Helper for selecting based on the whether SYCL/ComputeCPP is configured."""
|
||||
|
||||
# NOTE: Tensorflow expects some stange behavior (see their if_sycl) if we
|
||||
# NOTE: Tensorflow expects some strange behavior (see their if_sycl) if we
|
||||
# actually plan on supporting this at some point.
|
||||
return select({
|
||||
"//conditions:default": if_false,
|
||||
|
@ -270,7 +270,7 @@
|
||||
{
|
||||
"Gb_type": "Caught non-Exception value",
|
||||
"Context": "str(exc_instance)",
|
||||
"Explanation": "Except expects to recieve an object of Exception type but received {exc_instance}.",
|
||||
"Explanation": "Except expects to receive an object of Exception type but received {exc_instance}.",
|
||||
"Hints": [
|
||||
"Dynamo has detected that tracing the code will result in an error when running in eager. Please double check that your code doesn't contain a similar error when actually running eager/uncompiled."
|
||||
]
|
||||
@ -330,7 +330,7 @@
|
||||
"Hints": [
|
||||
"Use `torch._assert()` to raise a hard AssertionError when the check fails. ",
|
||||
"This error will propagate back the user code ",
|
||||
"that called the compiled function (i.e. Dynamo wil not trace any exception handling).",
|
||||
"that called the compiled function (i.e. Dynamo will not trace any exception handling).",
|
||||
"Remove the assert statement.",
|
||||
"Move the assert statement outside of any context managers in order to graph break with ",
|
||||
"partial graph compilation (if fullgraph=False).",
|
||||
@ -2050,7 +2050,7 @@
|
||||
{
|
||||
"Gb_type": "torch.distributed package is not available!",
|
||||
"Context": "",
|
||||
"Explanation": "The PyTorch package doesn't include torch.distributed when builing from source.",
|
||||
"Explanation": "The PyTorch package doesn't include torch.distributed when building from source.",
|
||||
"Hints": [
|
||||
"Set USE_DISTRIBUTED=1 to enable it when building PyTorch from source."
|
||||
]
|
||||
|
@ -300,7 +300,7 @@ def build_collectives(
|
||||
for _ in range(1, num_coalesced_entries):
|
||||
all_entries[i].pop(k)
|
||||
else:
|
||||
# Iterate through all the ranks and check if there is a mis-match for the current entry.
|
||||
# Iterate through all the ranks and check if there is a mismatch for the current entry.
|
||||
check_current_entry_match(
|
||||
all_entries,
|
||||
_pg_guids,
|
||||
|
@ -46,7 +46,7 @@ exp = re.compile(r"([\w\-\_]*?)(\d+)$")
|
||||
|
||||
def _determine_prefix(files: list[str]) -> str:
|
||||
"""If the user doesn't specify a prefix, but does pass a dir full of similarly-prefixed files, we should be able to
|
||||
infer the common prefix most of the time. But if we can't confidently infer, just fall back to requring the user
|
||||
infer the common prefix most of the time. But if we can't confidently infer, just fall back to requiring the user
|
||||
to specify it
|
||||
"""
|
||||
possible_prefixes: defaultdict[str, set[int]] = defaultdict(set)
|
||||
|
@ -312,7 +312,7 @@ def match_coalesced_groups_with_non_p2p(
|
||||
{first_rank},
|
||||
)
|
||||
|
||||
# Iterate through all the ranks and check if there is a mis-match for the current entry.
|
||||
# Iterate through all the ranks and check if there is a mismatch for the current entry.
|
||||
check_current_entry_match(
|
||||
all_coalesced_entries,
|
||||
_pg_guids,
|
||||
|
@ -14,7 +14,7 @@ Not Yet Implemented
|
||||
- TODO- tracebacks aren't implemented
|
||||
|
||||
Known Issues
|
||||
- Flight Recorder buffer sequence_id information is not sufficient to match collectives and coalseced collectives
|
||||
- Flight Recorder buffer sequence_id information is not sufficient to match collectives and coalesced collectives
|
||||
unless we have the trace data from the beginning of the program. To enable confident analysis of trace buffers that
|
||||
do not start from zero (and to simplify the script's matching logic) we need to add more information to the recorder.
|
||||
- Currently, the script omits checking the 'status' of collectives. We can look for the first 'non completed'
|
||||
|
@ -413,7 +413,7 @@ class FileLinter(Generic[PythonFileT], ABC):
|
||||
return not results or self.args.fix and all(r.is_edit for r in results)
|
||||
|
||||
def _error(self, pf: PythonFileT, result: LintResult) -> None:
|
||||
"""Called on files that are unparseable"""
|
||||
"""Called on files that are unparsable"""
|
||||
|
||||
def _replace(self, pf: PythonFileT) -> tuple[str, list[LintResult]]:
|
||||
# Because of recursive replacements, we need to repeat replacing and reparsing
|
||||
|
@ -1,5 +1,6 @@
|
||||
coo
|
||||
fro
|
||||
froms
|
||||
hsa
|
||||
nd
|
||||
optins
|
||||
|
@ -25,7 +25,7 @@ from pathlib import Path
|
||||
stem = Path(name).stem
|
||||
with NamedTemporaryFile(prefix=stem, suffix='.so', delete=False) as tf:
|
||||
tf.write(r)
|
||||
print("torch_deploy registering debug inforation for ", tf.name)
|
||||
print("torch_deploy registering debug information for ", tf.name)
|
||||
cmd1 = f"target modules add {tf.name}"
|
||||
# print(cmd1)
|
||||
lldb.debugger.HandleCommand(cmd1)
|
||||
|
@ -14,7 +14,7 @@ Or if you would like to check out the nightly commit in detached HEAD mode::
|
||||
$ ./tools/nightly.py checkout
|
||||
$ source venv/bin/activate # or `& .\venv\Scripts\Activate.ps1` on Windows
|
||||
|
||||
Or if you would like to re-use an existing virtual environment, you can pass in
|
||||
Or if you would like to reuse an existing virtual environment, you can pass in
|
||||
the prefix argument (--prefix)::
|
||||
|
||||
$ ./tools/nightly.py checkout -b my-nightly-branch -p my-env
|
||||
@ -686,7 +686,7 @@ def _nightly_version(site_dir: Path) -> str:
|
||||
|
||||
@timed("Checking out nightly PyTorch")
|
||||
def checkout_nightly_version(branch: str | None, site_dir: Path) -> None:
|
||||
"""Get's the nightly version and then checks it out."""
|
||||
"""Gets the nightly version and then checks it out."""
|
||||
nightly_version = _nightly_version(site_dir)
|
||||
if branch is None:
|
||||
# Detached mode - explicitly use --detach flag
|
||||
|
@ -1,4 +1,4 @@
|
||||
"""Tool to fix the nvcc's dependecy file output
|
||||
"""Tool to fix the nvcc's dependency file output
|
||||
|
||||
Usage: python nvcc_fix_deps.py nvcc [nvcc args]...
|
||||
|
||||
|
@ -62,7 +62,7 @@ def venv(interpreter: str) -> Iterator[str]:
|
||||
|
||||
|
||||
class Builder:
|
||||
# The python interpeter that we should be using
|
||||
# The python interpreter that we should be using
|
||||
interpreter: str
|
||||
|
||||
def __init__(self, interpreter: str) -> None:
|
||||
@ -124,7 +124,7 @@ def main() -> None:
|
||||
with venv(interpreter) as venv_interpreter:
|
||||
builder = Builder(venv_interpreter)
|
||||
# clean actually requires setuptools so we need to ensure we
|
||||
# install requriements before
|
||||
# install requirements before
|
||||
builder.install_requirements()
|
||||
builder.clean()
|
||||
|
||||
|
@ -173,7 +173,7 @@ def save_results(
|
||||
all_tests: dict[str, dict[str, int]],
|
||||
) -> None:
|
||||
"""
|
||||
Save the result to S3, which then gets put into the HUD backened database
|
||||
Save the result to S3, which then gets put into the HUD backend database
|
||||
"""
|
||||
should_be_enabled_tests = {
|
||||
name: stats
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
A Python script that logging the system-level utilization usage in json format.
|
||||
Data collected: CPU, memory, GPU memeory utilzation, and GPU utilization if available.
|
||||
Data collected: CPU, memory, GPU memory utilization, and GPU utilization if available.
|
||||
|
||||
Usage:
|
||||
- To run the script with default data collect time setting, use the following command:
|
||||
|
@ -54,7 +54,7 @@ class TestDocstringLinter(LinterTestCase):
|
||||
grandfather_file = f"{td}/grandfather.json"
|
||||
grandfather = f"--grandfather={grandfather_file}"
|
||||
|
||||
# Find some faiures
|
||||
# Find some failures
|
||||
run("before.txt", grandfather)
|
||||
|
||||
# Rewrite grandfather file
|
||||
|
@ -33,7 +33,7 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
# All currently running heuristics.
|
||||
# To add a heurstic in trial mode, specify the keywork argument `trial_mode=True`.
|
||||
# To add a heurstic in trial mode, specify the keyword argument `trial_mode=True`.
|
||||
HEURISTICS: list[HeuristicInterface] = [
|
||||
PreviouslyFailedInPR(),
|
||||
EditedByPR(),
|
||||
|
@ -285,7 +285,7 @@ class ShardedTest:
|
||||
if not isinstance(other, ShardedTest):
|
||||
raise NotImplementedError
|
||||
|
||||
# This is how the list was implicity sorted when it was a NamedTuple
|
||||
# This is how the list was implicitly sorted when it was a NamedTuple
|
||||
if self.name != other.name:
|
||||
return self.name < other.name
|
||||
if self.shard != other.shard:
|
||||
|
Reference in New Issue
Block a user