mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Put a compile lock around backward compile (#140626)
Summary: https://fb.workplace.com/groups/1286739428954016/posts/1370274947267130 Test Plan: ``` hg up b5b5adce34 vizard_projects/ml_depth/scripts/run_mld.sh ``` used to crash, no longer crashes Differential Revision: D65913100 Pull Request resolved: https://github.com/pytorch/pytorch/pull/140626 Approved by: https://github.com/ezyang
This commit is contained in:
committed by
PyTorch MergeBot
parent
3ce75e7ea6
commit
d32eac86f3
@ -1632,7 +1632,11 @@ def compile_fx(
|
||||
def bw_compiler(
|
||||
model: GraphModule, example_inputs: List[InputType]
|
||||
) -> Union[CompiledFxGraph, str]:
|
||||
with dynamo_utils.dynamo_timed("compile_fx.<locals>.bw_compiler"):
|
||||
from torch._dynamo.convert_frame import compile_lock
|
||||
|
||||
with dynamo_utils.dynamo_timed(
|
||||
"compile_fx.<locals>.bw_compiler"
|
||||
), compile_lock:
|
||||
model_outputs_node = output_node(model)
|
||||
if config.bw_outputs_user_visible:
|
||||
model_outputs = pytree.arg_tree_leaves(*model_outputs_node.args)
|
||||
|
Reference in New Issue
Block a user