mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "Recheck Autotune cache on Precompile serialization to prune compilation results (#158656)"
This reverts commit 664005662ad8c9aa1942015397048aa9ca14fd6d. Reverted https://github.com/pytorch/pytorch/pull/158656 on behalf of https://github.com/seemethere due to failing internal tests, see D80486843 ([comment](https://github.com/pytorch/pytorch/pull/158656#issuecomment-3201491561))
This commit is contained in:
@ -169,16 +169,7 @@ class PrecompileContext(CacheArtifactManager):
|
||||
by artifact type. This function transfers artifacts from _new_cache_artifacts_by_key to _new_cache_artifacts
|
||||
"""
|
||||
for artifact in cls._new_cache_artifacts_by_key.values():
|
||||
from torch._functorch._aot_autograd.autograd_cache import (
|
||||
BundledAOTAutogradCacheEntry,
|
||||
)
|
||||
|
||||
if isinstance(artifact, EditablePrecompileCacheArtifact):
|
||||
if isinstance(artifact.content, BundledAOTAutogradCacheEntry):
|
||||
# BundledAOTAutogradCacheEntries should update their autotune results
|
||||
artifact.edit_contents(
|
||||
BundledAOTAutogradCacheEntry.update_autotune_results
|
||||
)
|
||||
artifact = artifact.real_encode()
|
||||
cls._new_cache_artifacts[artifact.__class__.type()].append(artifact)
|
||||
cls._new_cache_artifacts_by_key.clear()
|
||||
@ -204,15 +195,6 @@ class PrecompileContext(CacheArtifactManager):
|
||||
"""
|
||||
result = cls._new_cache_artifacts_by_key.get(key, None)
|
||||
if isinstance(result, EditablePrecompileCacheArtifact):
|
||||
from torch._functorch._aot_autograd.autograd_cache import (
|
||||
BundledAOTAutogradCacheEntry,
|
||||
)
|
||||
|
||||
if isinstance(result.content, BundledAOTAutogradCacheEntry):
|
||||
# BundledAOTAutogradCacheEntries should update their autotune results
|
||||
result.edit_contents(
|
||||
BundledAOTAutogradCacheEntry.update_autotune_results
|
||||
)
|
||||
result = result.real_encode()
|
||||
return result
|
||||
|
||||
|
Reference in New Issue
Block a user