mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[AOTI] Cache treespec_loads calculation (#145815)
Summary: Treespec can be reused instead of calculated from str every AOTI module call. Using cached result saves 0.2ms for each module call. Test Plan: Before: {F1974751578} After: {F1974751667} Differential Revision: D68749539 Pull Request resolved: https://github.com/pytorch/pytorch/pull/145815 Approved by: https://github.com/henrylhtsang
This commit is contained in:
committed by
PyTorch MergeBot
parent
57d8278ab9
commit
eeb5e1bf20
@ -930,6 +930,7 @@ def treespec_dumps(treespec: TreeSpec, protocol: Optional[int] = None) -> str:
|
||||
return python_pytree.treespec_dumps(orig_treespec, protocol=protocol)
|
||||
|
||||
|
||||
@functools.lru_cache
|
||||
def treespec_loads(serialized: str) -> TreeSpec:
|
||||
"""Deserialize a treespec from a JSON string."""
|
||||
orig_treespec = python_pytree.treespec_loads(serialized)
|
||||
|
Reference in New Issue
Block a user