Revert "Recheck Autotune cache on Precompile serialization to prune compilation results (#158656)"

This reverts commit 664005662ad8c9aa1942015397048aa9ca14fd6d.

Reverted https://github.com/pytorch/pytorch/pull/158656 on behalf of https://github.com/seemethere due to failing internal tests, see D80486843 ([comment](https://github.com/pytorch/pytorch/pull/158656#issuecomment-3201491561))
This commit is contained in:
PyTorch MergeBot
2025-08-19 16:53:20 +00:00
parent fecc5f6001
commit eddaaa6c2a
7 changed files with 29 additions and 100 deletions

View File

@ -169,16 +169,7 @@ class PrecompileContext(CacheArtifactManager):
by artifact type. This function transfers artifacts from _new_cache_artifacts_by_key to _new_cache_artifacts
"""
for artifact in cls._new_cache_artifacts_by_key.values():
from torch._functorch._aot_autograd.autograd_cache import (
BundledAOTAutogradCacheEntry,
)
if isinstance(artifact, EditablePrecompileCacheArtifact):
if isinstance(artifact.content, BundledAOTAutogradCacheEntry):
# BundledAOTAutogradCacheEntries should update their autotune results
artifact.edit_contents(
BundledAOTAutogradCacheEntry.update_autotune_results
)
artifact = artifact.real_encode()
cls._new_cache_artifacts[artifact.__class__.type()].append(artifact)
cls._new_cache_artifacts_by_key.clear()
@ -204,15 +195,6 @@ class PrecompileContext(CacheArtifactManager):
"""
result = cls._new_cache_artifacts_by_key.get(key, None)
if isinstance(result, EditablePrecompileCacheArtifact):
from torch._functorch._aot_autograd.autograd_cache import (
BundledAOTAutogradCacheEntry,
)
if isinstance(result.content, BundledAOTAutogradCacheEntry):
# BundledAOTAutogradCacheEntries should update their autotune results
result.edit_contents(
BundledAOTAutogradCacheEntry.update_autotune_results
)
result = result.real_encode()
return result