mirror of
				https://github.com/pytorch/pytorch.git
				synced 2025-11-04 16:04:58 +08:00 
			
		
		
		
	Fixes #112633 Fixed errors relating to pydocstyle in the following files. The remaining errors are not covered in this issue. `torch/utils/dlpack.py` was not modified as the errors are relating to the function signature in the first line in the docstring which must be maintained as is for proper Sphinx interpretation. ```python def from_dlpack(ext_tensor: Any) -> 'torch.Tensor': """from_dlpack(ext_tensor) -> Tensor ..... """ ``` pydocstyle torch/utils/_contextlib.py --count before: 4 after: 0 pydocstyle torch/backends/mps/__init__.py --count before: 8 after: 1 **remaining errors** ``` torch/backends/mps/__init__.py:1 at module level: D104: Missing docstring in public package ``` pydocstyle torch/backends/xeon/run_cpu.py --count before: 13 after: 1 **remaining errors** ``` torch/backends/xeon/run_cpu.py:864 in public function `main`: D103: Missing docstring in public function ``` pydocstyle torch/backends/cpu/__init__.py --count before: 2 after: 1 **remaining errors** ``` torch/backends/cpu/__init__.py:1 at module level: D104: Missing docstring in public package ``` pydocstyle torch/utils/cpp_backtrace.py --count before: 4 after: 1 **remaining errors** ``` torch/utils/cpp_backtrace.py:1 at module level: D100: Missing docstring in public module ``` pydocstyle torch/utils/bundled_inputs.py --count before: 8 after: 1 **remaining errors** ``` torch/utils/bundled_inputs.py:1 at module level: D100: Missing docstring in public module ``` pydocstyle torch/utils/file_baton.py --count before: 8 after: 1 **remaining errors** ``` torch/utils/file_baton.py:1 at module level: D100: Missing docstring in public module ``` pydocstyle torch/utils/mobile_optimizer.py --count before: 6 after: 1 **remaining errors** ``` torch/utils/mobile_optimizer.py:8 in public class `LintCode`: D101: Missing docstring in public class ``` pydocstyle torch/backends/opt_einsum/__init__.py --count before: 7 after: 5 **remaining errors** ``` torch/backends/opt_einsum/__init__.py:1 at module level: D104: Missing docstring in public package torch/backends/opt_einsum/__init__.py:67 in public function `set_flags`: D103: Missing docstring in public function torch/backends/opt_einsum/__init__.py:77 in public function `flags`: D103: Missing docstring in public function torch/backends/opt_einsum/__init__.py:93 in public class `OptEinsumModule`: D101: Missing docstring in public class torch/backends/opt_einsum/__init__.py:94 in public method `__init__`: D107: Missing docstring in __init__ ``` pydocstyle torch/utils/_device.py --count before: 9 after: 6 **remaining errors** ``` torch/utils/_device.py:58 in public class `DeviceContext`: D101: Missing docstring in public class torch/utils/_device.py:59 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/_device.py:62 in public method `__enter__`: D105: Missing docstring in magic method torch/utils/_device.py:68 in public method `__exit__`: D105: Missing docstring in magic method torch/utils/_device.py:73 in public method `__torch_function__`: D105: Missing docstring in magic method torch/utils/_device.py:80 in public function `device_decorator`: D103: Missing docstring in public function ``` pydocstyle torch/utils/_freeze.py --count before: 15 after: 7 **remaining errors** ``` torch/utils/_freeze.py:77 in public function `indent_msg`: D103: Missing docstring in public function torch/utils/_freeze.py:89 in public class `FrozenModule`: D101: Missing docstring in public class torch/utils/_freeze.py:100 in public class `Freezer`: D101: Missing docstring in public class torch/utils/_freeze.py:101 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/_freeze.py:106 in public method `msg`: D102: Missing docstring in public method torch/utils/_freeze.py:185 in public method `get_module_qualname`: D102: Missing docstring in public method torch/utils/_freeze.py:206 in public method `compile_string`: D102: Missing docstring in public method ``` pydocstyle torch/utils/throughput_benchmark.py --count before: 25 after: 8 **remaining errors** ``` torch/utils/throughput_benchmark.py:1 at module level: D100: Missing docstring in public module torch/utils/throughput_benchmark.py:27 in public class `ExecutionStats`: D101: Missing docstring in public class torch/utils/throughput_benchmark.py:28 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/throughput_benchmark.py:33 in public method `latency_avg_ms`: D102: Missing docstring in public method torch/utils/throughput_benchmark.py:37 in public method `num_iters`: D102: Missing docstring in public method torch/utils/throughput_benchmark.py:46 in public method `total_time_seconds`: D102: Missing docstring in public method torch/utils/throughput_benchmark.py:50 in public method `__str__`: D105: Missing docstring in magic method torch/utils/throughput_benchmark.py:94 in public method `__init__`: D107: Missing docstring in __init__ ``` pydocstyle torch/utils/hooks.py --count before: 14 after: 11 **remaining errors** ``` torch/utils/hooks.py:1 at module level: D100: Missing docstring in public module torch/utils/hooks.py:23 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/hooks.py:34 in public method `remove`: D102: Missing docstring in public method torch/utils/hooks.py:44 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/hooks.py:50 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/hooks.py:64 in public method `__enter__`: D105: Missing docstring in magic method torch/utils/hooks.py:67 in public method `__exit__`: D105: Missing docstring in magic method torch/utils/hooks.py:82 in public function `warn_if_has_hooks`: D103: Missing docstring in public function torch/utils/hooks.py:103 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/hooks.py:188 in public method `setup_input_hook`: D102: Missing docstring in public method torch/utils/hooks.py:197 in public method `setup_output_hook`: D102: Missing docstring in public method ``` pydocstyle torch/utils/_traceback.py --count before: 19 after: 14 **remaining errors** ``` torch/utils/_traceback.py:47 in public function `report_compile_source_on_error`: D103: Missing docstring in public function torch/utils/_traceback.py:160 in public class `CapturedTraceback`: D101: Missing docstring in public class torch/utils/_traceback.py:163 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/_traceback.py:167 in public method `cleanup`: D102: Missing docstring in public method torch/utils/_traceback.py:170 in public method `summary`: D102: Missing docstring in public method torch/utils/_traceback.py:182 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/_traceback.py:190 in public method `extract`: D205: 1 blank line required between summary line and description (found 0) torch/utils/_traceback.py:190 in public method `extract`: D400: First line should end with a period (not 't') torch/utils/_traceback.py:213 in public method `format`: D205: 1 blank line required between summary line and description (found 0) torch/utils/_traceback.py:213 in public method `format`: D400: First line should end with a period (not 'f') torch/utils/_traceback.py:213 in public method `format`: D401: First line should be in imperative mood (perhaps 'Format', not 'Formats') torch/utils/_traceback.py:224 in public method `format_all`: D200: One-line docstring should fit on one line with quotes (found 3) torch/utils/_traceback.py:247 in private function `_extract_symbolized_tb`: D205: 1 blank line required between summary line and description (found 0) torch/utils/_traceback.py:247 in private function `_extract_symbolized_tb`: D400: First line should end with a period (not 'f') ``` pydocstyle torch/utils/mkldnn.py --count before: 28 after: 26 **remaining errors** ``` torch/utils/mkldnn.py:1 at module level: D100: Missing docstring in public module torch/utils/mkldnn.py:4 in public class `MkldnnLinear`: D101: Missing docstring in public class torch/utils/mkldnn.py:5 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:19 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:23 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:29 in public method `forward`: D102: Missing docstring in public method torch/utils/mkldnn.py:75 in public class `MkldnnConv1d`: D101: Missing docstring in public class torch/utils/mkldnn.py:76 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:82 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:88 in public class `MkldnnConv2d`: D101: Missing docstring in public class torch/utils/mkldnn.py:89 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:100 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:110 in public class `MkldnnConv3d`: D101: Missing docstring in public class torch/utils/mkldnn.py:111 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:122 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:133 in public class `MkldnnBatchNorm`: D101: Missing docstring in public class torch/utils/mkldnn.py:136 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:155 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:163 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:171 in public method `forward`: D102: Missing docstring in public method torch/utils/mkldnn.py:184 in public class `MkldnnPrelu`: D101: Missing docstring in public class torch/utils/mkldnn.py:185 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/mkldnn.py:190 in public method `__getstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:194 in public method `__setstate__`: D105: Missing docstring in magic method torch/utils/mkldnn.py:199 in public method `forward`: D102: Missing docstring in public method torch/utils/mkldnn.py:205 in public function `to_mkldnn`: D103: Missing docstring in public function ``` pydocstyle torch/utils/weak.py --count before: 32 after: 30 **remaining errors** ``` torch/utils/weak.py:1 at module level: D100: Missing docstring in public module torch/utils/weak.py:42 in public class `WeakIdRef`: D101: Missing docstring in public class torch/utils/weak.py:45 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/weak.py:54 in public method `__call__`: D102: Missing docstring in public method torch/utils/weak.py:61 in public method `__hash__`: D105: Missing docstring in magic method torch/utils/weak.py:64 in public method `__eq__`: D105: Missing docstring in magic method torch/utils/weak.py:84 in public class `WeakIdKeyDictionary`: D101: Missing docstring in public class torch/utils/weak.py:87 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/weak.py:131 in public method `__delitem__`: D105: Missing docstring in magic method torch/utils/weak.py:135 in public method `__getitem__`: D105: Missing docstring in magic method torch/utils/weak.py:138 in public method `__len__`: D105: Missing docstring in magic method torch/utils/weak.py:145 in public method `__repr__`: D105: Missing docstring in magic method torch/utils/weak.py:148 in public method `__setitem__`: D105: Missing docstring in magic method torch/utils/weak.py:151 in public method `copy`: D102: Missing docstring in public method torch/utils/weak.py:162 in public method `__deepcopy__`: D105: Missing docstring in magic method torch/utils/weak.py:172 in public method `get`: D102: Missing docstring in public method torch/utils/weak.py:175 in public method `__contains__`: D105: Missing docstring in magic method torch/utils/weak.py:182 in public method `items`: D102: Missing docstring in public method torch/utils/weak.py:189 in public method `keys`: D102: Missing docstring in public method torch/utils/weak.py:198 in public method `values`: D102: Missing docstring in public method torch/utils/weak.py:216 in public method `popitem`: D102: Missing docstring in public method torch/utils/weak.py:224 in public method `pop`: D102: Missing docstring in public method torch/utils/weak.py:228 in public method `setdefault`: D102: Missing docstring in public method torch/utils/weak.py:231 in public method `update`: D102: Missing docstring in public method torch/utils/weak.py:241 in public method `__ior__`: D105: Missing docstring in magic method torch/utils/weak.py:245 in public method `__or__`: D105: Missing docstring in magic method torch/utils/weak.py:252 in public method `__ror__`: D105: Missing docstring in magic method torch/utils/weak.py:262 in public method `__eq__`: D105: Missing docstring in magic method torch/utils/weak.py:276 in public method `__init__`: D107: Missing docstring in __init__ torch/utils/weak.py:280 in public method `__call__`: D102: Missing docstring in public method ``` @mikaylagawarecki @jbschlosser @svekars Pull Request resolved: https://github.com/pytorch/pytorch/pull/113311 Approved by: https://github.com/ezyang
		
			
				
	
	
		
			255 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			255 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
from types import TracebackType
 | 
						|
from typing import List, Optional
 | 
						|
import tempfile
 | 
						|
import traceback
 | 
						|
import contextlib
 | 
						|
import inspect
 | 
						|
import os.path
 | 
						|
 | 
						|
# This file contains utilities for ensuring dynamically compile()'d
 | 
						|
# code fragments display their line numbers in backtraces.
 | 
						|
#
 | 
						|
# The constraints:
 | 
						|
#
 | 
						|
# - We don't have control over the user exception printer (in particular,
 | 
						|
#   we cannot assume the linecache trick will work, c.f.
 | 
						|
#   https://stackoverflow.com/q/50515651/23845 )
 | 
						|
#
 | 
						|
# - We don't want to create temporary files every time we compile()
 | 
						|
#   some code; file creation should happen lazily only at exception
 | 
						|
#   time.  Arguably, you *should* be willing to write out your
 | 
						|
#   generated Python code to file system, but in some situations
 | 
						|
#   (esp. library code) it would violate user expectation to write
 | 
						|
#   to the file system, so we try to avoid it.  In particular, we'd
 | 
						|
#   like to keep the files around, so users can open up the files
 | 
						|
#   mentioned in the trace; if the file is invisible, we want to
 | 
						|
#   avoid clogging up the filesystem.
 | 
						|
#
 | 
						|
#   If this is not a constraint for you, there is a substantially simpler
 | 
						|
#   way to implement the functionality in this PR: instead of using
 | 
						|
#   eval/exec directly, just always write a Python file to filesystem
 | 
						|
#   and compile that.
 | 
						|
#
 | 
						|
# - You have control over a context where the compiled code will get
 | 
						|
#   executed, so that we can interpose while the stack is unwinding
 | 
						|
#   (otherwise, we have no way to interpose on the exception printing
 | 
						|
#   process.)
 | 
						|
#
 | 
						|
# There are two things you have to do to make use of the utilities here:
 | 
						|
#
 | 
						|
# - When you compile your source code, you must save its string source
 | 
						|
#   in its f_globals under the magic name "__compile_source__"
 | 
						|
#
 | 
						|
# - Before running the compiled code, enter the
 | 
						|
#   report_compile_source_on_error() context manager.
 | 
						|
 | 
						|
@contextlib.contextmanager
 | 
						|
def report_compile_source_on_error():
 | 
						|
    try:
 | 
						|
        yield
 | 
						|
    except Exception as exc:
 | 
						|
        tb = exc.__traceback__
 | 
						|
 | 
						|
        # Walk the traceback, looking for frames that have
 | 
						|
        # source attached
 | 
						|
        stack = []
 | 
						|
        while tb is not None:
 | 
						|
            filename = tb.tb_frame.f_code.co_filename
 | 
						|
            source = tb.tb_frame.f_globals.get("__compile_source__")
 | 
						|
 | 
						|
            if filename == "<string>" and source is not None:
 | 
						|
                # What black magic are we doing here?  Intuitively, what
 | 
						|
                # we would like to do is overwrite the co_filename on any
 | 
						|
                # frames that were generated from exec/eval so that they
 | 
						|
                # point to a temporary file that has the actual line
 | 
						|
                # information, so Python's default error printer can print
 | 
						|
                # useful line information on it.
 | 
						|
                #
 | 
						|
                # Writing out the temporary file is easy.  But overwriting
 | 
						|
                # co_filename is not!  You can't modify the code object
 | 
						|
                # associated with a frame.  You can, however, reconstruct
 | 
						|
                # a traceback with entirely new frames from scratch, so that's
 | 
						|
                # what we do.  But there's another problem, which is how to
 | 
						|
                # make the frame?
 | 
						|
                #
 | 
						|
                # The black magic is we make a frankenstein frame and code
 | 
						|
                # object which resembles the original frame/code enough so
 | 
						|
                # that it will print properly under traceback and the default
 | 
						|
                # error printer, but IT IS NOT THE ORIGINAL FRAME (you
 | 
						|
                # couldn't, e.g., execute its code with different variables
 | 
						|
                # and expect it to work.)
 | 
						|
 | 
						|
                # Don't delete the temporary file so the user can inspect it
 | 
						|
                # TODO: This creates a temporary file for every frame, but we
 | 
						|
                # technically only need one per distinct __compile_source__
 | 
						|
                with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".py") as f:
 | 
						|
                    f.write(source)
 | 
						|
                # Create a frame.  Python doesn't let you construct
 | 
						|
                # FrameType directly, so just make one with compile
 | 
						|
                frame = tb.tb_frame
 | 
						|
                code = compile('__inspect_currentframe()', f.name, 'eval')
 | 
						|
                code = code.replace(co_name=frame.f_code.co_name)
 | 
						|
                # Python 3.11 only
 | 
						|
                if hasattr(frame.f_code, 'co_linetable'):
 | 
						|
                    # We can't copy ALL of the metadata over, because you
 | 
						|
                    # can cause Python to segfault this way.  What exactly
 | 
						|
                    # do we need?  We need enough information for
 | 
						|
                    # traceback to be able to print the exception
 | 
						|
                    # correctly.  Code reading Lib/traceback.py reveals
 | 
						|
                    # that traceback calls code.co_positions() in order to
 | 
						|
                    # get the augmented line/col numbers.  Objects/codeobject.c,
 | 
						|
                    # specifically _PyCode_InitAddressRange, reveals that
 | 
						|
                    # this iterator is initialized from co_linetable and
 | 
						|
                    # co_firstfileno.  So copy these we must!
 | 
						|
                    code = code.replace(  # type: ignore[call-arg]
 | 
						|
                        co_linetable=frame.f_code.co_linetable,  # type: ignore[attr-defined]
 | 
						|
                        co_firstlineno=frame.f_code.co_firstlineno,  # type: ignore[attr-defined]
 | 
						|
                    )
 | 
						|
                fake_frame = eval(
 | 
						|
                    code,
 | 
						|
                    frame.f_globals,
 | 
						|
                    {
 | 
						|
                        **frame.f_locals,
 | 
						|
                        '__inspect_currentframe': inspect.currentframe
 | 
						|
                    }
 | 
						|
                )
 | 
						|
                fake_tb = TracebackType(
 | 
						|
                    None, fake_frame, tb.tb_lasti, tb.tb_lineno
 | 
						|
                )
 | 
						|
                stack.append(fake_tb)
 | 
						|
            else:
 | 
						|
                stack.append(tb)
 | 
						|
 | 
						|
            tb = tb.tb_next
 | 
						|
 | 
						|
        # Reconstruct the linked list
 | 
						|
        tb_next = None
 | 
						|
        for tb in reversed(stack):
 | 
						|
            tb.tb_next = tb_next
 | 
						|
            tb_next = tb
 | 
						|
 | 
						|
        raise exc.with_traceback(tb_next)  # noqa: TRY200
 | 
						|
 | 
						|
def shorten_filename(fn, *, base=None):
 | 
						|
    """Shorten a source filepath, with the assumption that torch/ subdirectories don't need to be shown to user."""
 | 
						|
    if base is None:
 | 
						|
        base = os.path.dirname(os.path.dirname(__file__))
 | 
						|
    # Truncate torch/foo.py to foo.py
 | 
						|
    try:
 | 
						|
        prefix = os.path.commonpath([fn, base])
 | 
						|
    except ValueError:
 | 
						|
        return fn
 | 
						|
    else:
 | 
						|
        return fn[len(prefix) + 1:]
 | 
						|
 | 
						|
def format_frame(frame, *, base=None, line=False):
 | 
						|
    """
 | 
						|
    Format a FrameSummary in a short way, without printing full absolute path or code.
 | 
						|
 | 
						|
    The idea is the result fits on a single line.
 | 
						|
    """
 | 
						|
    extra_line = ""
 | 
						|
    if line:
 | 
						|
        extra_line = f"{frame.line}  # "
 | 
						|
    return f"{extra_line}{shorten_filename(frame.filename, base=base)}:{frame.lineno} in {frame.name}"
 | 
						|
 | 
						|
def format_traceback_short(tb):
 | 
						|
    """Format a TracebackType in a short way, printing only the inner-most frame."""
 | 
						|
    return format_frame(traceback.extract_tb(tb)[-1])
 | 
						|
 | 
						|
class CapturedTraceback:
 | 
						|
    __slots__ = ['tb', 'skip']
 | 
						|
 | 
						|
    def __init__(self, tb, skip=0):
 | 
						|
        self.tb = tb
 | 
						|
        self.skip = skip
 | 
						|
 | 
						|
    def cleanup(self):
 | 
						|
        self.tb = None
 | 
						|
 | 
						|
    def summary(self):
 | 
						|
        import torch._C._profiler
 | 
						|
 | 
						|
        if self.tb is None:
 | 
						|
            # TODO: Maybe indicate that the traceback was elided?
 | 
						|
            return traceback.StackSummary()
 | 
						|
 | 
						|
        return _extract_symbolized_tb(
 | 
						|
            torch._C._profiler.symbolize_tracebacks([self.tb])[0],
 | 
						|
            self.skip
 | 
						|
        )
 | 
						|
 | 
						|
    def __getstate__(self):
 | 
						|
        return (None, {
 | 
						|
            'tb': None,  # TB is not pickleable
 | 
						|
            'skip': self.skip,
 | 
						|
        })
 | 
						|
 | 
						|
    @staticmethod
 | 
						|
    def extract(*, script=False, cpp=False, skip=0):
 | 
						|
        """
 | 
						|
        Like traceback.extract_stack(), but faster (approximately 20x faster); it
 | 
						|
        is fast enough that you can unconditionally log stacks this way as part of
 | 
						|
        normal execution.  It returns a torch._C._profiler.CapturedTraceback
 | 
						|
        object that must be formatted specially with format_captured_tb.
 | 
						|
 | 
						|
        By default, this only reports Python backtraces (like extract_stack).  You
 | 
						|
        can set the script/cpp kwargs to also turn on TorchScript/C++ trace
 | 
						|
        reporting.
 | 
						|
        """
 | 
						|
        import torch._C._profiler
 | 
						|
 | 
						|
        if script or cpp:
 | 
						|
            assert skip == 0, "skip with script/cpp NYI"
 | 
						|
 | 
						|
        return CapturedTraceback(
 | 
						|
            torch._C._profiler.gather_traceback(python=True, script=script, cpp=cpp),
 | 
						|
            # Elide extract() frame if we don't have script/cpp frames.  If
 | 
						|
            # we do have those frames, it doesn't work so force zero.
 | 
						|
            0 if script or cpp else skip + 1
 | 
						|
        )
 | 
						|
 | 
						|
    def format(self):
 | 
						|
        """
 | 
						|
        Formats a single torch._C._profiler.CapturedTraceback into a list of
 | 
						|
        strings equivalent to the output of traceback.format_list.  Note that if
 | 
						|
        pass it CapturedTraceback with C++ traces,  it is better not to use this
 | 
						|
        function and use the batch formatting API format_captured_tbs to amortize
 | 
						|
        the cost of symbolization
 | 
						|
        """
 | 
						|
        return traceback.format_list(self.summary())
 | 
						|
 | 
						|
    @staticmethod
 | 
						|
    def format_all(tbs):
 | 
						|
        """
 | 
						|
        Bulk version of CapturedTraceback.format.  Returns a list of list of strings.
 | 
						|
        """
 | 
						|
        import torch._C._profiler
 | 
						|
 | 
						|
        # Directly populate tracebacks that already have cached summaries
 | 
						|
        rs: List[Optional[List[str]]] = []
 | 
						|
        delayed_idxs = []
 | 
						|
        for i, tb in enumerate(tbs):
 | 
						|
            if tb.tb is None:
 | 
						|
                rs.append([])
 | 
						|
            else:
 | 
						|
                rs.append(None)
 | 
						|
                delayed_idxs.append(i)
 | 
						|
 | 
						|
        stbs = torch._C._profiler.symbolize_tracebacks([tbs[i].tb for i in delayed_idxs])
 | 
						|
        for i, stb in zip(delayed_idxs, stbs):
 | 
						|
            rs[i] = traceback.format_list(tbs[i].summary())
 | 
						|
 | 
						|
        return rs
 | 
						|
 | 
						|
 | 
						|
def _extract_symbolized_tb(tb, skip):
 | 
						|
    """
 | 
						|
    Given a symbolized traceback from symbolize_tracebacks, return a StackSummary object of
 | 
						|
    pre-processed stack trace entries.
 | 
						|
    """
 | 
						|
    stack = traceback.StackSummary()
 | 
						|
    for f in reversed(tb[skip:]):
 | 
						|
        stack.append(traceback.FrameSummary(f['filename'], f['line'], f['name']))
 | 
						|
    return stack
 |