mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Update torch.autograd.graph logging to not print out grad_output (#116523)
Instead of printing the tensor's data print the dtype and shape metadata of the tensor. ``` Executing: <VarMeanBackward0 object at 0x1352d0e20> with grad_outputs: [None,f32[]] ``` This is important in order to avoid doing a cuda sync and also useful to reduce verbosity. Pull Request resolved: https://github.com/pytorch/pytorch/pull/116523 Approved by: https://github.com/albanD
This commit is contained in:
committed by
PyTorch MergeBot
parent
29ae4f22bf
commit
4e666ba011
@ -657,9 +657,18 @@ def _register_logging_hooks_on_whole_graph(t_outputs: List[torch.Tensor]):
|
||||
|
||||
yield node
|
||||
|
||||
def prehook(grad_output):
|
||||
def fmt(t):
|
||||
# Avoid circular import
|
||||
from torch.testing._internal.common_utils import dtype_abbrs
|
||||
|
||||
if t is None:
|
||||
return "None"
|
||||
return f"{dtype_abbrs[t.dtype]}[{', '.join(map(str, t.shape))}]"
|
||||
|
||||
def prehook(grad_outputs):
|
||||
node = torch._C._current_autograd_node()
|
||||
log_str = f"Executing: {node} with grad_output: {grad_output}"
|
||||
grad_outputs_str = f"[{','.join(fmt(t) for t in grad_outputs)}]"
|
||||
log_str = f"Executing: {node} with grad_outputs: {grad_outputs_str}"
|
||||
log.debug(log_str)
|
||||
|
||||
handles = []
|
||||
|
Reference in New Issue
Block a user