mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] fix typos in top-level files (#156067)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/156067 Approved by: https://github.com/malfet ghstack dependencies: #156066
This commit is contained in:
committed by
PyTorch MergeBot
parent
6c493e2b14
commit
013dfeabb4
@ -1158,7 +1158,6 @@ exclude_patterns = [
|
||||
'torch/_inductor/autoheuristic/artifacts/**',
|
||||
# These files are all grandfathered in, feel free to remove from this list
|
||||
# as necessary
|
||||
'*',
|
||||
'.ci/**',
|
||||
'.circleci/**',
|
||||
'.github/**',
|
||||
|
@ -500,7 +500,7 @@ filegroup(
|
||||
# To achieve finer granularity and make debug easier, caffe2 is split into three libraries:
|
||||
# ATen, caffe2 and caffe2_for_aten_headers. ATen lib group up source codes under
|
||||
# aten/ directory and caffe2 contains most files under `caffe2/` directory. Since the
|
||||
# ATen lib and the caffe2 lib would depend on each other, `caffe2_for_aten_headers` is splitted
|
||||
# ATen lib and the caffe2 lib would depend on each other, `caffe2_for_aten_headers` is split
|
||||
# out from `caffe2` to avoid dependency cycle.
|
||||
cc_library(
|
||||
name = "caffe2_for_aten_headers",
|
||||
|
@ -700,7 +700,7 @@ endif()
|
||||
if(USE_KLEIDIAI AND CMAKE_C_COMPILER_VERSION)
|
||||
if(CMAKE_C_COMPILER_VERSION VERSION_LESS 11)
|
||||
set(USE_KLEIDIAI OFF)
|
||||
message(WARNING "Disabling KleidiAI: Requires atleast GCC 11 or Clang 11")
|
||||
message(WARNING "Disabling KleidiAI: Requires at least GCC 11 or Clang 11")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@ -1258,7 +1258,7 @@ endif()
|
||||
add_subdirectory(c10)
|
||||
add_subdirectory(caffe2)
|
||||
|
||||
# ---[ CMake related files Uninistall option.
|
||||
# ---[ CMake related files Uninstall option.
|
||||
if(NOT TARGET caffe2_uninstall)
|
||||
configure_file(
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in
|
||||
|
@ -70,7 +70,7 @@ RUN /opt/conda/bin/conda install -y python=${PYTHON_VERSION}
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# INSTALL_CHANNEL whl - release, whl/nightly - nightly, whle/test - test channels
|
||||
# INSTALL_CHANNEL whl - release, whl/nightly - nightly, whl/test - test channels
|
||||
RUN case ${TARGETPLATFORM} in \
|
||||
"linux/arm64") pip install --extra-index-url https://download.pytorch.org/whl/cpu/ torch torchvision torchaudio ;; \
|
||||
*) pip install --index-url https://download.pytorch.org/${INSTALL_CHANNEL}/${CUDA_PATH#.}/ torch torchvision torchaudio ;; \
|
||||
|
@ -402,7 +402,7 @@ def get_aten_generated_files(enabled_backends):
|
||||
|
||||
# This is tiresome. A better strategy would be to unconditionally
|
||||
# generate these files, and then only actually COMPILE them depended
|
||||
# on the generated set. C'est la vie...
|
||||
# on the generated set. C'est la vie... # codespell:ignore vie
|
||||
if "CPU" in enabled_backends:
|
||||
src_files.extend(aten_ufunc_generated_cpu_sources())
|
||||
src_files.extend(aten_ufunc_generated_cpu_kernel_sources())
|
||||
@ -525,7 +525,7 @@ def copy_template_registration_files(name, apple_sdks = None):
|
||||
|
||||
# Ideally, we would run one copy command for a single source directory along
|
||||
# with all its child directories, but it's somewhat hard to know if a directory
|
||||
# is a child of another just bu looking at the metadata (directory relative
|
||||
# is a child of another just by looking at the metadata (directory relative
|
||||
# path) that we currently have since 1 directory could look like a parent of
|
||||
# another and yet come from a different filegroup() rule.
|
||||
#
|
||||
@ -776,7 +776,7 @@ def copy_metal(name, apple_sdks = None):
|
||||
|
||||
# Metal custom ops currently have to be brought into selective build because they directly reference metal ops instead of
|
||||
# going through the dispatcher. There is some weird issues with the genrule and these files locations on windows though, so
|
||||
# for now we simply skip building them for windows where they very likely arent needed anyway.
|
||||
# for now we simply skip building them for windows where they very likely aren't needed anyway.
|
||||
# Metal MaskRCNN custom op
|
||||
for full_path in METAL_MASKRCNN_SOURCE_LIST:
|
||||
path_prefix = paths.dirname(full_path)
|
||||
@ -792,7 +792,7 @@ def copy_metal(name, apple_sdks = None):
|
||||
name = name,
|
||||
cmd = " && ".join(cmd),
|
||||
cmd_exe = "@powershell -Command " + ("; ".join(cmd_exe)),
|
||||
# due to an obscure bug certain custom ops werent being copied correctly on windows. ARVR also sometimes builds android targets on windows,
|
||||
# due to an obscure bug certain custom ops weren't being copied correctly on windows. ARVR also sometimes builds android targets on windows,
|
||||
# so we just exclude those targets from being copied for those platforms (They end up uncompiled anyway).
|
||||
outs = select({
|
||||
"DEFAULT": get_metal_registration_files_outs(),
|
||||
@ -1256,11 +1256,11 @@ def define_buck_targets(
|
||||
extra_flags = {
|
||||
"fbandroid_compiler_flags": ["-frtti"],
|
||||
},
|
||||
# torch_mobile_deserialize brings in sources neccessary to read a module
|
||||
# torch_mobile_deserialize brings in sources necessary to read a module
|
||||
# which depends on mobile module definition
|
||||
# link_whole is enable so that all symbols neccessary for mobile module are compiled
|
||||
# link_whole is enable so that all symbols necessary for mobile module are compiled
|
||||
# instead of only symbols used while loading; this prevents symbol
|
||||
# found definied in runtime
|
||||
# found defined in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
linker_flags = get_no_as_needed_linker_flag(),
|
||||
@ -1376,11 +1376,11 @@ def define_buck_targets(
|
||||
"torch/csrc/jit/mobile/import.h",
|
||||
"torch/csrc/jit/mobile/flatbuffer_loader.h",
|
||||
],
|
||||
# torch_mobile_deserialize brings in sources neccessary to read a module
|
||||
# torch_mobile_deserialize brings in sources necessary to read a module
|
||||
# which depends on mobile module definition
|
||||
# link_whole is enable so that all symbols neccessary for mobile module are compiled
|
||||
# link_whole is enable so that all symbols necessary for mobile module are compiled
|
||||
# instead of only symbols used while loading; this prevents symbol
|
||||
# found definied in runtime
|
||||
# found defined in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
linker_flags = get_no_as_needed_linker_flag(),
|
||||
@ -1407,9 +1407,9 @@ def define_buck_targets(
|
||||
exported_headers = [],
|
||||
compiler_flags = get_pt_compiler_flags(),
|
||||
exported_preprocessor_flags = get_pt_preprocessor_flags() + (["-DSYMBOLICATE_MOBILE_DEBUG_HANDLE"] if get_enable_eager_symbolication() else []),
|
||||
# torch_mobile_core brings in sources neccessary to read and run a module
|
||||
# torch_mobile_core brings in sources necessary to read and run a module
|
||||
# link_whole is enabled so that all symbols linked
|
||||
# operators, registerations and other few symbols are need in runtime
|
||||
# operators, registrations and other few symbols are need in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
linker_flags = get_no_as_needed_linker_flag(),
|
||||
@ -1523,10 +1523,10 @@ def define_buck_targets(
|
||||
],
|
||||
compiler_flags = get_pt_compiler_flags(),
|
||||
exported_preprocessor_flags = get_pt_preprocessor_flags() + ["-DUSE_MOBILE_CLASSTYPE"],
|
||||
# torch_mobile_train brings in sources neccessary to read and run a mobile
|
||||
# torch_mobile_train brings in sources necessary to read and run a mobile
|
||||
# and save and load mobile params along with autograd
|
||||
# link_whole is enabled so that all symbols linked
|
||||
# operators, registerations and autograd related symbols are need in runtime
|
||||
# operators, registrations and autograd related symbols are need in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
visibility = ["PUBLIC"],
|
||||
@ -1548,9 +1548,9 @@ def define_buck_targets(
|
||||
],
|
||||
compiler_flags = get_pt_compiler_flags(),
|
||||
exported_preprocessor_flags = get_pt_preprocessor_flags(),
|
||||
# torch brings in all sources neccessary to read and run a mobile module/jit module
|
||||
# torch brings in all sources necessary to read and run a mobile module/jit module
|
||||
# link_whole is enabled so that all symbols linked
|
||||
# operators, registerations and other few symbols are need in runtime
|
||||
# operators, registrations and other few symbols are need in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
visibility = ["PUBLIC"],
|
||||
@ -1575,7 +1575,7 @@ def define_buck_targets(
|
||||
],
|
||||
compiler_flags = get_pt_compiler_flags(),
|
||||
exported_preprocessor_flags = get_pt_preprocessor_flags() + ["-DUSE_MOBILE_CLASSTYPE"],
|
||||
# torch_mobile_train_import_data brings in sources neccessary to read a mobile module
|
||||
# torch_mobile_train_import_data brings in sources necessary to read a mobile module
|
||||
# link_whole is enabled so that all symbols linked
|
||||
# operators other few symbols are need in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
@ -1654,10 +1654,10 @@ def define_buck_targets(
|
||||
],
|
||||
compiler_flags = get_pt_compiler_flags(),
|
||||
exported_preprocessor_flags = get_pt_preprocessor_flags() + (["-DSYMBOLICATE_MOBILE_DEBUG_HANDLE"] if get_enable_eager_symbolication() else []),
|
||||
# torch_mobile_model_tracer brings in sources neccessary to read and run a jit module
|
||||
# torch_mobile_model_tracer brings in sources necessary to read and run a jit module
|
||||
# and trace the ops
|
||||
# link_whole is enabled so that all symbols linked
|
||||
# operators, registerations and other few symbols are need in runtime
|
||||
# operators, registrations and other few symbols are need in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
linker_flags = get_no_as_needed_linker_flag(),
|
||||
@ -1842,11 +1842,11 @@ def define_buck_targets(
|
||||
extra_flags = {
|
||||
"fbandroid_compiler_flags": ["-frtti"],
|
||||
},
|
||||
# torch_mobile_deserialize brings in sources neccessary to read a module
|
||||
# torch_mobile_deserialize brings in sources necessary to read a module
|
||||
# which depends on mobile module definition
|
||||
# link_whole is enable so that all symbols neccessary for mobile module are compiled
|
||||
# link_whole is enable so that all symbols necessary for mobile module are compiled
|
||||
# instead of only symbols used while loading; this prevents symbol
|
||||
# found definied in runtime
|
||||
# found defined in runtime
|
||||
# @lint-ignore BUCKLINT link_whole
|
||||
link_whole = True,
|
||||
linker_flags = get_no_as_needed_linker_flag(),
|
||||
|
@ -1520,7 +1520,7 @@ aten_cuda_cu_with_sort_by_key_source_list = [
|
||||
"aten/src/ATen/native/cuda/Unique.cu",
|
||||
]
|
||||
|
||||
# Followings are source code for xnnpack delegate
|
||||
# Following are source code for xnnpack delegate
|
||||
|
||||
xnnpack_delegate_serializer_header = [
|
||||
"torch/csrc/jit/backends/xnnpack/serialization/serializer.h",
|
||||
|
@ -20,7 +20,7 @@ disallow_any_unimported = True
|
||||
strict = True
|
||||
implicit_reexport = False
|
||||
|
||||
# do not reenable this:
|
||||
# do not re-enable this:
|
||||
# https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657
|
||||
warn_unused_ignores = False
|
||||
|
||||
|
2
mypy.ini
2
mypy.ini
@ -17,7 +17,7 @@ follow_imports = normal
|
||||
local_partial_types = True
|
||||
enable_error_code = possibly-undefined
|
||||
|
||||
# do not reenable this:
|
||||
# do not re-enable this:
|
||||
# https://github.com/pytorch/pytorch/pull/60006#issuecomment-866130657
|
||||
warn_unused_ignores = False
|
||||
|
||||
|
@ -210,7 +210,7 @@ def get_metal_registration_files_outs():
|
||||
|
||||
# There is a really weird issue with the arvr windows builds where
|
||||
# the custom op files are breaking them. See https://fburl.com/za87443c
|
||||
# The hack is just to not build them for that platform and pray they arent needed.
|
||||
# The hack is just to not build them for that platform and pray they aren't needed.
|
||||
def get_metal_registration_files_outs_windows():
|
||||
outs = {}
|
||||
for file_path in METAL_SOURCE_LIST:
|
||||
|
2
setup.py
2
setup.py
@ -427,7 +427,7 @@ def check_submodules():
|
||||
end = time.time()
|
||||
report(f" --- Submodule initialization took {end - start:.2f} sec")
|
||||
except Exception:
|
||||
report(" --- Submodule initalization failed")
|
||||
report(" --- Submodule initialization failed")
|
||||
report("Please run:\n\tgit submodule update --init --recursive")
|
||||
sys.exit(1)
|
||||
for folder in folders:
|
||||
|
@ -0,0 +1,2 @@
|
||||
coo
|
||||
Raison
|
||||
|
Reference in New Issue
Block a user