mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Resolve docstring errors in throughput_benchmark.py, weak.py, _traceback.py, file_baton.py, _contextlib.py, _device.py, cpp_backtrace.py, bundled_inputs.py, run_cpu.py, hooks.py, mobile_optimizer.py, _freeze.py, __init__.py, mkldnn.py, dlpack.py (#113311)
Fixes #112633 Fixed errors relating to pydocstyle in the following files. The remaining errors are not covered in this issue. `torch/utils/dlpack.py` was not modified as the errors are relating to the function signature in the first line in the docstring which must be maintained as is for proper Sphinx interpretation. ```python def from_dlpack(ext_tensor: Any) -> 'torch.Tensor': """from_dlpack(ext_tensor) -> Tensor ..... """ ``` pydocstyle torch/utils/_contextlib.py --count before: 4 after: 0 pydocstyle torch/backends/mps/__init__.py --count before: 8 after: 1 **remaining errors** ``` torch/backends/mps/__init__.py:1 at module level: D104: Missing docstring in public package ``` pydocstyle torch/backends/xeon/run_cpu.py --count before: 13 after: 1 **remaining errors** ``` torch/backends/xeon/run_cpu.py:864 in public function `main`: D103: Missing docstring in public function ``` pydocstyle torch/backends/cpu/__init__.py --count before: 2 after: 1 **remaining errors** ``` torch/backends/cpu/__init__.py:1 at module level: D104: Missing docstring in public package ``` pydocstyle torch/utils/cpp_backtrace.py --count before: 4 after: 1 **remaining errors** ``` torch/utils/cpp_backtrace.py:1 at module level: D100: Missing docstring in public module ``` pydocstyle torch/utils/bundled_inputs.py --count before: 8 after: 1 **remaining errors** ``` torch/utils/bundled_inputs.py:1 at module level: D100: Missing docstring in public module ``` pydocstyle torch/utils/file_baton.py --count before: 8 after: 1 **remaining errors** ``` torch/utils/file_baton.py:1 at module level: D100: Missing docstring in public module ``` pydocstyle torch/utils/mobile_optimizer.py --count before: 6 after: 1 **remaining errors** ``` torch/utils/mobile_optimizer.py:8 in public class `LintCode`: D101: Missing docstring in public class ``` pydocstyle torch/backends/opt_einsum/__init__.py --count before: 7 after: 5 **remaining errors** ``` torch/backends/opt_einsum/__init__.py:1 at module level: D104: Missing docstring in public package torch/backends/opt_einsum/__init__.py:67 in public function `set_flags`: D103: Missing docstring in public function torch/backends/opt_einsum/__init__.py:77 in public function `flags`: D103: Missing docstring in public function torch/backends/opt_einsum/__init__.py:93 in public class `OptEinsumModule`: D101: Missing docstring in public class torch/backends/opt_einsum/__init__.py:94 in public method `__init__`: D107: Missing docstring in __init__ ``` pydocstyle torch/utils/_device.py --count before: 9 after: 6 **remaining errors** ``` torch/utils/_device.py:58 in public class `DeviceContext`: D101: Missing docstring in public class torch/utils/_device.py:59 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/_device.py:62 in public method `__enter__`: D105: Missing docstring in magic method torch/utils/_device.py:68 in public method `__exit__`: D105: Missing docstring in magic method torch/utils/_device.py:73 in public method `__torch_function__`: D105: Missing docstring in magic method torch/utils/_device.py:80 in public function `device_decorator`: D103: Missing docstring in public function ``` pydocstyle torch/utils/_freeze.py --count before: 15 after: 7 **remaining errors** ``` torch/utils/_freeze.py:77 in public function `indent_msg`: D103: Missing docstring in public function torch/utils/_freeze.py:89 in public class `FrozenModule`: D101: Missing docstring in public class torch/utils/_freeze.py:100 in public class `Freezer`: D101: Missing docstring in public class torch/utils/_freeze.py:101 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/_freeze.py:106 in public method `msg`: D102: Missing docstring in public method torch/utils/_freeze.py:185 in public method `get_module_qualname`: D102: Missing docstring in public method torch/utils/_freeze.py:206 in public method `compile_string`: D102: Missing docstring in public method ``` pydocstyle torch/utils/throughput_benchmark.py --count before: 25 after: 8 **remaining errors** ``` torch/utils/throughput_benchmark.py:1 at module level: D100: Missing docstring in public module torch/utils/throughput_benchmark.py:27 in public class `ExecutionStats`: D101: Missing docstring in public class torch/utils/throughput_benchmark.py:28 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/throughput_benchmark.py:33 in public method `latency_avg_ms`: D102: Missing docstring in public method torch/utils/throughput_benchmark.py:37 in public method `num_iters`: D102: Missing docstring in public method torch/utils/throughput_benchmark.py:46 in public method `total_time_seconds`: D102: Missing docstring in public method torch/utils/throughput_benchmark.py:50 in public method `__str__`: D105: Missing docstring in magic method torch/utils/throughput_benchmark.py:94 in public method `__init__`: D107: Missing docstring in __init__ ``` pydocstyle torch/utils/hooks.py --count before: 14 after: 11 **remaining errors** ``` torch/utils/hooks.py:1 at module level: D100: Missing docstring in public module torch/utils/hooks.py:23 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/hooks.py:34 in public method `remove`: D102: Missing docstring in public method torch/utils/hooks.py:44 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/hooks.py:50 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/hooks.py:64 in public method `__enter__`: D105: Missing docstring in magic method torch/utils/hooks.py:67 in public method `__exit__`: D105: Missing docstring in magic method torch/utils/hooks.py:82 in public function `warn_if_has_hooks`: D103: Missing docstring in public function torch/utils/hooks.py:103 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/hooks.py:188 in public method `setup_input_hook`: D102: Missing docstring in public method torch/utils/hooks.py:197 in public method `setup_output_hook`: D102: Missing docstring in public method ``` pydocstyle torch/utils/_traceback.py --count before: 19 after: 14 **remaining errors** ``` torch/utils/_traceback.py:47 in public function `report_compile_source_on_error`: D103: Missing docstring in public function torch/utils/_traceback.py:160 in public class `CapturedTraceback`: D101: Missing docstring in public class torch/utils/_traceback.py:163 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/_traceback.py:167 in public method `cleanup`: D102: Missing docstring in public method torch/utils/_traceback.py:170 in public method `summary`: D102: Missing docstring in public method torch/utils/_traceback.py:182 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/_traceback.py:190 in public method `extract`: D205: 1 blank line required between summary line and description (found 0) torch/utils/_traceback.py:190 in public method `extract`: D400: First line should end with a period (not 't') torch/utils/_traceback.py:213 in public method `format`: D205: 1 blank line required between summary line and description (found 0) torch/utils/_traceback.py:213 in public method `format`: D400: First line should end with a period (not 'f') torch/utils/_traceback.py:213 in public method `format`: D401: First line should be in imperative mood (perhaps 'Format', not 'Formats') torch/utils/_traceback.py:224 in public method `format_all`: D200: One-line docstring should fit on one line with quotes (found 3) torch/utils/_traceback.py:247 in private function `_extract_symbolized_tb`: D205: 1 blank line required between summary line and description (found 0) torch/utils/_traceback.py:247 in private function `_extract_symbolized_tb`: D400: First line should end with a period (not 'f') ``` pydocstyle torch/utils/mkldnn.py --count before: 28 after: 26 **remaining errors** ``` torch/utils/mkldnn.py:1 at module level: D100: Missing docstring in public module torch/utils/mkldnn.py:4 in public class `MkldnnLinear`: D101: Missing docstring in public class torch/utils/mkldnn.py:5 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:19 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:23 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:29 in public method `forward`: D102: Missing docstring in public method torch/utils/mkldnn.py:75 in public class `MkldnnConv1d`: D101: Missing docstring in public class torch/utils/mkldnn.py:76 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:82 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:88 in public class `MkldnnConv2d`: D101: Missing docstring in public class torch/utils/mkldnn.py:89 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:100 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:110 in public class `MkldnnConv3d`: D101: Missing docstring in public class torch/utils/mkldnn.py:111 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:122 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:133 in public class `MkldnnBatchNorm`: D101: Missing docstring in public class torch/utils/mkldnn.py:136 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:155 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:163 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:171 in public method `forward`: D102: Missing docstring in public method torch/utils/mkldnn.py:184 in public class `MkldnnPrelu`: D101: Missing docstring in public class torch/utils/mkldnn.py:185 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:190 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:194 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:199 in public method `forward`: D102: Missing docstring in public method torch/utils/mkldnn.py:205 in public function `to_mkldnn`: D103: Missing docstring in public function ``` pydocstyle torch/utils/weak.py --count before: 32 after: 30 **remaining errors** ``` torch/utils/weak.py:1 at module level: D100: Missing docstring in public module torch/utils/weak.py:42 in public class `WeakIdRef`: D101: Missing docstring in public class torch/utils/weak.py:45 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/weak.py:54 in public method `__call__`: D102: Missing docstring in public method torch/utils/weak.py:61 in public method `__hash__`: D105: Missing docstring in magic method torch/utils/weak.py:64 in public method `__eq__`: D105: Missing docstring in magic method torch/utils/weak.py:84 in public class `WeakIdKeyDictionary`: D101: Missing docstring in public class torch/utils/weak.py:87 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/weak.py:131 in public method `__delitem__`: D105: Missing docstring in magic method torch/utils/weak.py:135 in public method `__getitem__`: D105: Missing docstring in magic method torch/utils/weak.py:138 in public method `__len__`: D105: Missing docstring in magic method torch/utils/weak.py:145 in public method `__repr__`: D105: Missing docstring in magic method torch/utils/weak.py:148 in public method `__setitem__`: D105: Missing docstring in magic method torch/utils/weak.py:151 in public method `copy`: D102: Missing docstring in public method torch/utils/weak.py:162 in public method `__deepcopy__`: D105: Missing docstring in magic method torch/utils/weak.py:172 in public method `get`: D102: Missing docstring in public method torch/utils/weak.py:175 in public method `__contains__`: D105: Missing docstring in magic method torch/utils/weak.py:182 in public method `items`: D102: Missing docstring in public method torch/utils/weak.py:189 in public method `keys`: D102: Missing docstring in public method torch/utils/weak.py:198 in public method `values`: D102: Missing docstring in public method torch/utils/weak.py:216 in public method `popitem`: D102: Missing docstring in public method torch/utils/weak.py:224 in public method `pop`: D102: Missing docstring in public method torch/utils/weak.py:228 in public method `setdefault`: D102: Missing docstring in public method torch/utils/weak.py:231 in public method `update`: D102: Missing docstring in public method torch/utils/weak.py:241 in public method `__ior__`: D105: Missing docstring in magic method torch/utils/weak.py:245 in public method `__or__`: D105: Missing docstring in magic method torch/utils/weak.py:252 in public method `__ror__`: D105: Missing docstring in magic method torch/utils/weak.py:262 in public method `__eq__`: D105: Missing docstring in magic method torch/utils/weak.py:276 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/weak.py:280 in public method `__call__`: D102: Missing docstring in public method ``` @mikaylagawarecki @jbschlosser @svekars Pull Request resolved: https://github.com/pytorch/pytorch/pull/113311 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
e100ff42fd
commit
7f9fafed53
@ -6,7 +6,7 @@ __all__ = [
|
||||
|
||||
|
||||
def get_cpu_capability() -> str:
|
||||
r"""Returns cpu capability as a string value.
|
||||
r"""Return cpu capability as a string value.
|
||||
|
||||
Possible values:
|
||||
- "DEFAULT"
|
||||
|
@ -7,22 +7,24 @@ __all__ = ["is_built", "is_available", "is_macos13_or_newer"]
|
||||
|
||||
|
||||
def is_built() -> bool:
|
||||
r"""Returns whether PyTorch is built with MPS support. Note that this
|
||||
doesn't necessarily mean MPS is available; just that if this PyTorch
|
||||
binary were run a machine with working MPS drivers and devices, we
|
||||
would be able to use it."""
|
||||
r"""Return whether PyTorch is built with MPS support.
|
||||
|
||||
Note that this doesn't necessarily mean MPS is available; just that
|
||||
if this PyTorch binary were run a machine with working MPS drivers
|
||||
and devices, we would be able to use it.
|
||||
"""
|
||||
return torch._C._has_mps
|
||||
|
||||
|
||||
@_lru_cache
|
||||
def is_available() -> bool:
|
||||
r"""Returns a bool indicating if MPS is currently available."""
|
||||
r"""Return a bool indicating if MPS is currently available."""
|
||||
return torch._C._mps_is_available()
|
||||
|
||||
|
||||
@_lru_cache
|
||||
def is_macos13_or_newer(minor: int = 0) -> bool:
|
||||
r"""Returns a bool indicating whether MPS is running on MacOS 13 or newer."""
|
||||
r"""Return a bool indicating whether MPS is running on MacOS 13 or newer."""
|
||||
return torch._C._mps_is_on_macos_13_or_newer(minor)
|
||||
|
||||
|
||||
@ -30,7 +32,7 @@ _lib = None
|
||||
|
||||
|
||||
def _init():
|
||||
r"""Register prims as implementation of var_mean and group_norm"""
|
||||
r"""Register prims as implementation of var_mean and group_norm."""
|
||||
global _lib
|
||||
if is_built() is False or _lib is not None:
|
||||
return
|
||||
|
@ -14,12 +14,12 @@ except ImportError:
|
||||
|
||||
@_lru_cache
|
||||
def is_available() -> bool:
|
||||
r"""Returns a bool indicating if opt_einsum is currently available."""
|
||||
r"""Return a bool indicating if opt_einsum is currently available."""
|
||||
return _opt_einsum is not None
|
||||
|
||||
|
||||
def get_opt_einsum() -> Any:
|
||||
r"""Returns the opt_einsum package if opt_einsum is currently available, else None."""
|
||||
r"""Return the opt_einsum package if opt_einsum is currently available, else None."""
|
||||
return _opt_einsum
|
||||
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
"""
|
||||
This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable Processors with optimal configurations.
|
||||
|
||||
Single instance inference, multi-instance inference are enabled.
|
||||
|
||||
Note: term "instance" here doesn't refer to a cloud instance. This script is executed as a single process. It invokes
|
||||
@ -140,9 +141,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class _CPUinfo:
|
||||
"""
|
||||
Get CPU information, such as cores list and NUMA information.
|
||||
"""
|
||||
"""Get CPU information, such as cores list and NUMA information."""
|
||||
|
||||
def __init__(self, test_input=""):
|
||||
self.cpuinfo = []
|
||||
@ -229,7 +228,9 @@ class _CPUinfo:
|
||||
|
||||
def numa_aware_check(self, core_list):
|
||||
"""
|
||||
Check whether all cores in core_list are in the same NUMA node. cross NUMA will reduce performance.
|
||||
Check whether all cores in core_list are in the same NUMA node.
|
||||
|
||||
Cross NUMA will reduce performance.
|
||||
We strongly advice to not use cores on different nodes.
|
||||
"""
|
||||
cores_numa_map = self.logical_core_node_map
|
||||
@ -254,9 +255,7 @@ instance. Alternatively, please use --skip-cross-node-cores knob.",
|
||||
|
||||
|
||||
class _Launcher:
|
||||
r"""
|
||||
Class for launcher
|
||||
"""
|
||||
r"""Class for launcher."""
|
||||
|
||||
msg_lib_notfound = f"Unable to find the {{0}} library file lib{{1}}.so in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib \
|
||||
or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or \
|
||||
@ -266,9 +265,7 @@ or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib6
|
||||
self.cpuinfo = _CPUinfo()
|
||||
|
||||
def add_lib_preload(self, lib_type):
|
||||
"""
|
||||
Enable TCMalloc/JeMalloc/intel OpenMP
|
||||
"""
|
||||
"""Enable TCMalloc/JeMalloc/intel OpenMP."""
|
||||
library_paths = []
|
||||
if "CONDA_PREFIX" in os.environ:
|
||||
library_paths.append(f"{os.environ['CONDA_PREFIX']}/lib")
|
||||
@ -324,6 +321,7 @@ or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib6
|
||||
):
|
||||
"""
|
||||
Enable TCMalloc/JeMalloc with LD_PRELOAD and set configuration for JeMalloc.
|
||||
|
||||
By default, PTMalloc will be used for PyTorch, but TCMalloc and JeMalloc can get better
|
||||
memory reuse and reduce page fault to improve performance.
|
||||
"""
|
||||
@ -405,6 +403,7 @@ Value applied: %s. Value ignored: %s",
|
||||
):
|
||||
"""
|
||||
Set multi-thread configuration and enable Intel openMP and TCMalloc/JeMalloc.
|
||||
|
||||
By default, GNU openMP and PTMalloc are used in PyTorch. but Intel openMP and TCMalloc/JeMalloc are better alternatives
|
||||
to get performance benefit.
|
||||
"""
|
||||
@ -816,7 +815,8 @@ def _add_kmp_iomp_params(parser):
|
||||
|
||||
def create_args(parser=None):
|
||||
"""
|
||||
Helper function parsing the command line options
|
||||
Parse the command line options.
|
||||
|
||||
@retval ArgumentParser
|
||||
"""
|
||||
parser.add_argument(
|
||||
|
@ -68,8 +68,9 @@ def _wrap_generator(ctx_factory, func):
|
||||
|
||||
def context_decorator(ctx, func):
|
||||
"""
|
||||
Like contextlib.ContextDecorator, but:
|
||||
Like contextlib.ContextDecorator.
|
||||
|
||||
But with the following differences:
|
||||
1. Is done by wrapping, rather than inheritance, so it works with context
|
||||
managers that are implemented from C and thus cannot easily inherit from
|
||||
Python classes
|
||||
@ -81,7 +82,6 @@ def context_decorator(ctx, func):
|
||||
be a multi-shot context manager that can be directly invoked multiple times)
|
||||
or a callable that produces a context manager.
|
||||
"""
|
||||
|
||||
assert not (callable(ctx) and hasattr(ctx, '__enter__')), (
|
||||
f"Passed in {ctx} is both callable and also a valid context manager "
|
||||
"(has __enter__), making it ambiguous which interface to use. If you "
|
||||
@ -118,7 +118,7 @@ def context_decorator(ctx, func):
|
||||
|
||||
|
||||
class _DecoratorContextManager:
|
||||
"""Allow a context manager to be used as a decorator"""
|
||||
"""Allow a context manager to be used as a decorator."""
|
||||
|
||||
def __call__(self, orig_func: F) -> F:
|
||||
if inspect.isclass(orig_func):
|
||||
@ -144,7 +144,7 @@ class _DecoratorContextManager:
|
||||
|
||||
|
||||
class _NoParamDecoratorContextManager(_DecoratorContextManager):
|
||||
"""Allow a context manager to be used as a decorator without parentheses"""
|
||||
"""Allow a context manager to be used as a decorator without parentheses."""
|
||||
|
||||
def __new__(cls, orig_func=None):
|
||||
if orig_func is None:
|
||||
|
@ -82,9 +82,9 @@ def device_decorator(device, func):
|
||||
|
||||
def set_device(device):
|
||||
"""
|
||||
Decorator which sets the default device inside of the wrapped
|
||||
function. If you would like to use this as a context manager,
|
||||
use device as a context manager directly, e.g.,
|
||||
``with torch.device(device)``.
|
||||
Set the default device inside of the wrapped function by decorating it with this function.
|
||||
|
||||
If you would like to use this as a context manager, use device as a
|
||||
context manager directly, e.g., ``with torch.device(device)``.
|
||||
"""
|
||||
return lambda func: device_decorator(torch.device(device), func)
|
||||
|
@ -117,8 +117,9 @@ class Freezer:
|
||||
|
||||
def write_bytecode(self, install_root):
|
||||
"""
|
||||
Write the `.c` files containing the frozen bytecode. Shard frozen
|
||||
modules evenly across the files.
|
||||
Write the `.c` files containing the frozen bytecode.
|
||||
|
||||
Shared frozen modules evenly across the files.
|
||||
"""
|
||||
bytecode_file_names = [
|
||||
f"bytecode_{i}.c" for i in range(NUM_BYTECODE_FILES)
|
||||
@ -132,10 +133,7 @@ class Freezer:
|
||||
f.close()
|
||||
|
||||
def write_main(self, install_root, oss, symbol_name):
|
||||
"""
|
||||
Write the `main.c` file containing a table enumerating all the
|
||||
frozen modules.
|
||||
"""
|
||||
"""Write the `main.c` file containing a table enumerating all the frozen modules."""
|
||||
with open(os.path.join(install_root, "main.c"), "w") as outfp:
|
||||
outfp.write(MAIN_INCLUDES)
|
||||
for m in self.frozen_modules:
|
||||
@ -150,9 +148,7 @@ class Freezer:
|
||||
outfp.write(MAIN_SUFFIX)
|
||||
|
||||
def write_frozen(self, m: FrozenModule, outfp):
|
||||
"""
|
||||
Write a single frozen module's bytecode out to a C variable.
|
||||
"""
|
||||
"""Write a single frozen module's bytecode out to a C variable."""
|
||||
outfp.write(f"unsigned char {m.c_name}[] = {{")
|
||||
for i in range(0, len(m.bytecode), 16):
|
||||
outfp.write("\n\t")
|
||||
@ -161,7 +157,7 @@ class Freezer:
|
||||
outfp.write("\n};\n")
|
||||
|
||||
def compile_path(self, path: Path, top_package_path: Path):
|
||||
"""Generic entry point for compiling a Path object."""
|
||||
"""Entry point for compiling a Path object."""
|
||||
if path.is_dir():
|
||||
self.compile_package(path, top_package_path)
|
||||
else:
|
||||
@ -220,8 +216,9 @@ class Freezer:
|
||||
@indent_msg
|
||||
def compile_file(self, path: Path, top_package_path: Path):
|
||||
"""
|
||||
Compile a Python source file to frozen bytecode. Append the result to
|
||||
`self.frozen_modules`.
|
||||
Compile a Python source file to frozen bytecode.
|
||||
|
||||
Append the result to `self.frozen_modules`.
|
||||
"""
|
||||
assert path.is_file()
|
||||
if path.suffix != ".py":
|
||||
|
@ -131,10 +131,7 @@ def report_compile_source_on_error():
|
||||
raise exc.with_traceback(tb_next) # noqa: TRY200
|
||||
|
||||
def shorten_filename(fn, *, base=None):
|
||||
"""
|
||||
Shorten a source filepath, under the assumption that anything under torch/
|
||||
directory is "obvious" and doesn't need to be shown to user.
|
||||
"""
|
||||
"""Shorten a source filepath, with the assumption that torch/ subdirectories don't need to be shown to user."""
|
||||
if base is None:
|
||||
base = os.path.dirname(os.path.dirname(__file__))
|
||||
# Truncate torch/foo.py to foo.py
|
||||
@ -147,8 +144,9 @@ def shorten_filename(fn, *, base=None):
|
||||
|
||||
def format_frame(frame, *, base=None, line=False):
|
||||
"""
|
||||
Format a FrameSummary in a short way, without printing full absolute path
|
||||
or code. The idea is the result fits on a single line.
|
||||
Format a FrameSummary in a short way, without printing full absolute path or code.
|
||||
|
||||
The idea is the result fits on a single line.
|
||||
"""
|
||||
extra_line = ""
|
||||
if line:
|
||||
@ -156,9 +154,7 @@ def format_frame(frame, *, base=None, line=False):
|
||||
return f"{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}"
|
||||
|
||||
def format_traceback_short(tb):
|
||||
"""
|
||||
Format a TracebackType in a short way, printing only the inner-most frame.
|
||||
"""
|
||||
"""Format a TracebackType in a short way, printing only the inner-most frame."""
|
||||
return format_frame(traceback.extract_tb(tb)[-1])
|
||||
|
||||
class CapturedTraceback:
|
||||
|
@ -11,25 +11,26 @@ T = TypeVar("T")
|
||||
MAX_RAW_TENSOR_SIZE = 16
|
||||
|
||||
class InflatableArg(NamedTuple):
|
||||
""" Helper type for bundled inputs.
|
||||
"""Helper type for bundled inputs.
|
||||
|
||||
'value' is the compressed/deflated input that is stored in the model. Value
|
||||
must be of the same type as the argument to the function that it is a deflated
|
||||
input for.
|
||||
'value' is the compressed/deflated input that is stored in the model. Value
|
||||
must be of the same type as the argument to the function that it is a deflated
|
||||
input for.
|
||||
|
||||
'fmt' is a formatable code string that is executed to inflate the compressed data into
|
||||
the appropriate input. It can use 'value' as an input to the format str. It must result
|
||||
in a value of the same type as 'value'.
|
||||
'fmt' is a formatable code string that is executed to inflate the compressed data into
|
||||
the appropriate input. It can use 'value' as an input to the format str. It must result
|
||||
in a value of the same type as 'value'.
|
||||
|
||||
'fmt_fn' is a formatable function code string that is executed to inflate the compressed
|
||||
data into the appropriate input. It must result in a value of the same type as 'value'.
|
||||
The function name should be the formatable part of the string.
|
||||
'fmt_fn' is a formatable function code string that is executed to inflate the compressed
|
||||
data into the appropriate input. It must result in a value of the same type as 'value'.
|
||||
The function name should be the formatable part of the string.
|
||||
|
||||
Note: Only top level InflatableArgs can be inflated. i.e. you cannot place
|
||||
an inflatable arg inside of some other structure. You should instead create
|
||||
an inflatable arg such that the fmt code string returns the full structure
|
||||
of your input.
|
||||
"""
|
||||
|
||||
value: Any
|
||||
fmt: str = "{}"
|
||||
fmt_fn: str = ""
|
||||
@ -42,8 +43,9 @@ def bundle_inputs(
|
||||
*,
|
||||
_receive_inflate_expr: Optional[List[str]] = None,
|
||||
) -> torch.jit.ScriptModule:
|
||||
"""Creates and returns a copy of the specified model with inputs attached. The original model is
|
||||
not mutated or changed in any way.
|
||||
"""Create and return a copy of the specified model with inputs attached.
|
||||
|
||||
The original model is not mutated or changed in any way.
|
||||
|
||||
Models with bundled inputs can be invoked in a uniform manner by
|
||||
benchmarking and code coverage tools.
|
||||
@ -129,7 +131,7 @@ def augment_model_with_bundled_inputs(
|
||||
info: Optional[List[str]] = None, # Optional argument to provide info about forward or its inputs
|
||||
skip_size_check=False,
|
||||
) -> None:
|
||||
""" Add bundled sample inputs to a model for the forward function.
|
||||
"""Add bundled sample inputs to a model for the forward function.
|
||||
|
||||
Models with bundled inputs can be invoked in a uniform manner by
|
||||
benchmarking and code coverage tools.
|
||||
@ -159,7 +161,6 @@ def augment_model_with_bundled_inputs(
|
||||
- `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements
|
||||
of each tuple are the args that make up one input.
|
||||
"""
|
||||
|
||||
if not isinstance(model, torch.jit.ScriptModule):
|
||||
raise Exception("Only ScriptModule is supported.")
|
||||
|
||||
|
@ -2,10 +2,10 @@ from torch._C import _get_cpp_backtrace
|
||||
|
||||
def get_cpp_backtrace(frames_to_skip=0, maximum_number_of_frames=64) -> str:
|
||||
r"""
|
||||
Returns a string containing the C++ stack trace of the current thread.
|
||||
Return a string containing the C++ stack trace of the current thread.
|
||||
|
||||
Args:
|
||||
frames_to_skip (int): the number of frames to skip from the top of the stack
|
||||
maximum_number_of_frames (int): the maximum number of frames to return
|
||||
"""
|
||||
|
||||
return _get_cpp_backtrace(frames_to_skip, maximum_number_of_frames)
|
||||
|
@ -3,28 +3,28 @@ import time
|
||||
|
||||
|
||||
class FileBaton:
|
||||
'''A primitive, file-based synchronization utility.'''
|
||||
"""A primitive, file-based synchronization utility."""
|
||||
|
||||
def __init__(self, lock_file_path, wait_seconds=0.1):
|
||||
'''
|
||||
Creates a new :class:`FileBaton`.
|
||||
"""
|
||||
Create a new :class:`FileBaton`.
|
||||
|
||||
Args:
|
||||
lock_file_path: The path to the file used for locking.
|
||||
wait_seconds: The seconds to periodically sleep (spin) when
|
||||
calling ``wait()``.
|
||||
'''
|
||||
"""
|
||||
self.lock_file_path = lock_file_path
|
||||
self.wait_seconds = wait_seconds
|
||||
self.fd = None
|
||||
|
||||
def try_acquire(self):
|
||||
'''
|
||||
Tries to atomically create a file under exclusive access.
|
||||
"""
|
||||
Try to atomically create a file under exclusive access.
|
||||
|
||||
Returns:
|
||||
True if the file could be created, else False.
|
||||
'''
|
||||
"""
|
||||
try:
|
||||
self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL)
|
||||
return True
|
||||
@ -32,17 +32,17 @@ class FileBaton:
|
||||
return False
|
||||
|
||||
def wait(self):
|
||||
'''
|
||||
"""
|
||||
Periodically sleeps for a certain amount until the baton is released.
|
||||
|
||||
The amount of time slept depends on the ``wait_seconds`` parameter
|
||||
passed to the constructor.
|
||||
'''
|
||||
"""
|
||||
while os.path.exists(self.lock_file_path):
|
||||
time.sleep(self.wait_seconds)
|
||||
|
||||
def release(self):
|
||||
'''Releases the baton and removes its file.'''
|
||||
"""Release the baton and removes its file."""
|
||||
if self.fd is not None:
|
||||
os.close(self.fd)
|
||||
|
||||
|
@ -70,7 +70,8 @@ class RemovableHandle:
|
||||
|
||||
def unserializable_hook(f):
|
||||
"""
|
||||
Decorator which marks a function as an unserializable hook.
|
||||
Mark a function as an unserializable hook with this decorator.
|
||||
|
||||
This suppresses warnings that would otherwise arise if you attempt
|
||||
to serialize a tensor that has a hook.
|
||||
"""
|
||||
@ -91,6 +92,7 @@ def warn_if_has_hooks(tensor):
|
||||
class BackwardHook:
|
||||
"""
|
||||
A wrapper class to implement nn.Module backward hooks.
|
||||
|
||||
It handles:
|
||||
- Ignoring non-Tensor inputs and replacing them by None before calling the user hook
|
||||
- Generating the proper Node to capture a set of Tensor's gradients
|
||||
|
@ -34,7 +34,8 @@ class MkldnnLinear(torch.jit.ScriptModule):
|
||||
|
||||
|
||||
class _MkldnnConvNd(torch.jit.ScriptModule):
|
||||
"""Common base of MkldnnConv1d and MkldnnConv2d"""
|
||||
"""Common base of MkldnnConv1d and MkldnnConv2d."""
|
||||
|
||||
__constants__ = ['stride', 'padding', 'dilation', 'groups']
|
||||
|
||||
def __init__(self, dense_module):
|
||||
|
@ -1,6 +1,4 @@
|
||||
"""
|
||||
This module contains utility method for mobile model optimization and lint.
|
||||
"""
|
||||
"""This module contains utility method for mobile model optimization and lint."""
|
||||
|
||||
import torch
|
||||
from enum import Enum
|
||||
@ -19,6 +17,8 @@ def optimize_for_mobile(
|
||||
preserved_methods: Optional[List[AnyStr]] = None,
|
||||
backend: str = 'CPU') -> torch.jit.RecursiveScriptModule:
|
||||
"""
|
||||
Optimize a torch script module for mobile deployment.
|
||||
|
||||
Args:
|
||||
script_module: An instance of torch script module with type of ScriptModule.
|
||||
optimization_blocklist: A set with type of MobileOptimizerType. When set is not passed,
|
||||
@ -77,8 +77,10 @@ def optimize_for_mobile(
|
||||
|
||||
def generate_mobile_module_lints(script_module: torch.jit.ScriptModule):
|
||||
"""
|
||||
Generate a list of lints for a given torch script module.
|
||||
|
||||
Args:
|
||||
script_module: An instance of torch script module with type of ScriptModule
|
||||
script_module: An instance of torch script module with type of ScriptModule.
|
||||
|
||||
Returns:
|
||||
lint_map: A list of dictionary that contains modules lints
|
||||
|
@ -3,7 +3,7 @@ import torch._C
|
||||
|
||||
|
||||
def format_time(time_us=None, time_ms=None, time_s=None):
|
||||
'''Defines how to format time'''
|
||||
"""Define time formatting."""
|
||||
assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1
|
||||
|
||||
US_IN_SECOND = 1e6
|
||||
@ -39,9 +39,7 @@ class ExecutionStats:
|
||||
|
||||
@property
|
||||
def iters_per_second(self):
|
||||
'''
|
||||
Returns total number of iterations per second across all calling threads
|
||||
'''
|
||||
"""Return total number of iterations per second across all calling threads."""
|
||||
return self.num_iters / self.total_time_seconds
|
||||
|
||||
@property
|
||||
@ -59,13 +57,14 @@ class ExecutionStats:
|
||||
|
||||
|
||||
class ThroughputBenchmark:
|
||||
'''
|
||||
This class is a wrapper around a c++ component throughput_benchmark::ThroughputBenchmark
|
||||
responsible for executing a PyTorch module (nn.Module or ScriptModule)
|
||||
under an inference server like load. It can emulate multiple calling threads
|
||||
to a single module provided. In the future we plan to enhance this component
|
||||
to support inter and intra-op parallelism as well as multiple models
|
||||
running in a single process.
|
||||
"""
|
||||
This class is a wrapper around a c++ component throughput_benchmark::ThroughputBenchmark.
|
||||
|
||||
This wrapper on the throughput_benchmark::ThroughputBenchmark component is responsible
|
||||
for executing a PyTorch module (nn.Module or ScriptModule) under an inference
|
||||
server like load. It can emulate multiple calling threads to a single module
|
||||
provided. In the future we plan to enhance this component to support inter and
|
||||
intra-op parallelism as well as multiple models running in a single process.
|
||||
|
||||
Please note that even though nn.Module is supported, it might incur an overhead
|
||||
from the need to hold GIL every time we execute Python code or pass around
|
||||
@ -90,8 +89,7 @@ class ThroughputBenchmark:
|
||||
... )
|
||||
>>> print("Avg latency (ms): {}".format(stats.latency_avg_ms))
|
||||
>>> print("Number of iterations: {}".format(stats.num_iters))
|
||||
|
||||
'''
|
||||
"""
|
||||
|
||||
def __init__(self, module):
|
||||
if isinstance(module, torch.jit.ScriptModule):
|
||||
@ -100,21 +98,23 @@ class ThroughputBenchmark:
|
||||
self._benchmark = torch._C.ThroughputBenchmark(module)
|
||||
|
||||
def run_once(self, *args, **kwargs):
|
||||
'''
|
||||
"""
|
||||
Given input id (input_idx) run benchmark once and return prediction.
|
||||
|
||||
This is useful for testing that benchmark actually runs the module you
|
||||
want it to run. input_idx here is an index into inputs array populated
|
||||
by calling add_input() method.
|
||||
'''
|
||||
"""
|
||||
return self._benchmark.run_once(*args, **kwargs)
|
||||
|
||||
def add_input(self, *args, **kwargs):
|
||||
'''
|
||||
Store a single input to a module into the benchmark memory and keep it
|
||||
there. During the benchmark execution every thread is going to pick up a
|
||||
"""
|
||||
Store a single input to a module into the benchmark memory and keep it there.
|
||||
|
||||
During the benchmark execution every thread is going to pick up a
|
||||
random input from the all the inputs ever supplied to the benchmark via
|
||||
this function.
|
||||
'''
|
||||
"""
|
||||
self._benchmark.add_input(*args, **kwargs)
|
||||
|
||||
def benchmark(
|
||||
@ -123,7 +123,9 @@ class ThroughputBenchmark:
|
||||
num_warmup_iters=10,
|
||||
num_iters=100,
|
||||
profiler_output_path=""):
|
||||
'''
|
||||
"""
|
||||
Run a benchmark on the module.
|
||||
|
||||
Args:
|
||||
num_warmup_iters (int): Warmup iters are used to make sure we run a module
|
||||
a few times before actually measuring things. This way we avoid cold
|
||||
@ -147,7 +149,7 @@ class ThroughputBenchmark:
|
||||
It currently has two fields:
|
||||
- num_iters - number of actual iterations the benchmark have made
|
||||
- avg_latency_ms - average time it took to infer on one input example in milliseconds
|
||||
'''
|
||||
"""
|
||||
config = torch._C.BenchmarkConfig()
|
||||
config.num_calling_threads = num_calling_threads
|
||||
config.num_warmup_iters = num_warmup_iters
|
||||
|
@ -269,10 +269,7 @@ WeakTensorKeyDictionary = WeakIdKeyDictionary
|
||||
|
||||
|
||||
class TensorWeakRef:
|
||||
"""
|
||||
Wrapper around a weak ref of a Tensor that handles the _fix_weakref() call required
|
||||
when unwrapping a Tensor weakref.
|
||||
"""
|
||||
"""Wrapper around a weak ref of a Tensor that handles the _fix_weakref() call required when unwrapping a Tensor weakref."""
|
||||
|
||||
ref: WeakRef[Tensor]
|
||||
|
||||
|
Reference in New Issue
Block a user