Migrate from lru_cache to cache (#155613)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/155613
Approved by: https://github.com/ezyang
ghstack dependencies: #155612
This commit is contained in:
Oguz Ulgen
2025-06-11 08:59:26 -07:00
committed by PyTorch MergeBot
parent f80a61adf5
commit d1947a8707
70 changed files with 157 additions and 157 deletions

View File

@ -1124,7 +1124,7 @@ class TritonTemplateKernel(TritonKernel):
]
@functools.lru_cache(None)
@functools.cache
def _jinja2_env():
try:
import jinja2
@ -1726,7 +1726,7 @@ class ExternKernelChoice:
def call_name(self):
return f"extern_kernels.{self.name}"
@functools.lru_cache(None) # noqa: B019
@functools.cache # noqa: B019
def hash_key(self):
fn = self.to_callable()
parts = [
@ -1933,7 +1933,7 @@ class ExternKernelCaller(ChoiceCaller):
return f"extern_{self.choice.name}"
@functools.lru_cache(None)
@functools.cache
def get_mm_log_filename() -> Optional[str]:
mm_file_name = os.environ.get("TORCHINDUCTOR_MM_LOGGING_FILE", None)
if not mm_file_name:
@ -2052,7 +2052,7 @@ class NoValidChoicesError(RuntimeError):
pass
@functools.lru_cache(None)
@functools.cache
def get_num_workers() -> int:
if "TORCHINDUCTOR_COMPILE_THREADS" in os.environ:
return int(os.environ["TORCHINDUCTOR_COMPILE_THREADS"])
@ -2194,7 +2194,7 @@ class AlgorithmSelectorCache(PersistentCache):
# CUDATemplateCaller still needs to go through autotuning process to retrieve workspace size.
return choices[0].output_node()
@functools.lru_cache(None)
@functools.cache
def make_benchmark_fn():
return self.make_benchmark_fn(choices, input_nodes, layout, input_gen_fns)
@ -2506,7 +2506,7 @@ class AlgorithmSelectorCache(PersistentCache):
future.add_done_callback(on_complete)
futures[future] = c
@functools.lru_cache(None)
@functools.cache
@restore_stdout_stderr()
def wait_on_futures():
log.debug("Waiting on futures")