mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Revert "Add torch compile force disable caches alias (#158072)"
This reverts commit 2ecf083b7247f265a03ec296ba9d7b795f035118. Reverted https://github.com/pytorch/pytorch/pull/158072 on behalf of https://github.com/jeffdaily due to fails on rocm, signal ignored while rocm was unstable ([comment](https://github.com/pytorch/pytorch/pull/158072#issuecomment-3086740829))
This commit is contained in:
@ -717,5 +717,5 @@ backtrace is slow and very spammy so it is not included by default with extended
|
|||||||
|
|
||||||
In order to measure the cold start compilation time or debug a cache corruption,
|
In order to measure the cold start compilation time or debug a cache corruption,
|
||||||
it is possible pass `TORCHINDUCTOR_FORCE_DISABLE_CACHES=1` or set
|
it is possible pass `TORCHINDUCTOR_FORCE_DISABLE_CACHES=1` or set
|
||||||
`torch.compiler.config.force_disable_caches = True` which will override any
|
`torch._inductor.config.force_disable_caches = True` which will override any
|
||||||
other caching config option and disable all compile time caching.
|
other caching config option and disable all compile time caching.
|
||||||
|
@ -521,9 +521,9 @@ def process_automatic_dynamic(
|
|||||||
|
|
||||||
def get_cache_key() -> Optional[str]:
|
def get_cache_key() -> Optional[str]:
|
||||||
# TODO: info versions of these logs that log only once
|
# TODO: info versions of these logs that log only once
|
||||||
if torch.compiler.config.force_disable_caches:
|
if torch._inductor.config.force_disable_caches:
|
||||||
warn_once(
|
warn_once(
|
||||||
"dynamo_pgo force disabled by torch.compiler.config.force_disable_caches"
|
"dynamo_pgo force disabled by torch._inductor.config.force_disable_caches"
|
||||||
)
|
)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -566,7 +566,7 @@ def code_state_path(cache_key: str) -> Optional[str]:
|
|||||||
|
|
||||||
|
|
||||||
def should_use_remote_dynamo_pgo_cache() -> bool:
|
def should_use_remote_dynamo_pgo_cache() -> bool:
|
||||||
if torch.compiler.config.force_disable_caches:
|
if torch._inductor.config.force_disable_caches:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if (r := torch._dynamo.config.automatic_dynamic_remote_pgo) is not None:
|
if (r := torch._dynamo.config.automatic_dynamic_remote_pgo) is not None:
|
||||||
|
@ -95,7 +95,7 @@ class FXGraphCacheMiss(BypassAOTAutogradCache):
|
|||||||
|
|
||||||
|
|
||||||
def should_use_remote_autograd_cache():
|
def should_use_remote_autograd_cache():
|
||||||
if torch.compiler.config.force_disable_caches:
|
if torch._inductor.config.force_disable_caches:
|
||||||
return False
|
return False
|
||||||
if config.enable_remote_autograd_cache is not None:
|
if config.enable_remote_autograd_cache is not None:
|
||||||
return config.enable_remote_autograd_cache
|
return config.enable_remote_autograd_cache
|
||||||
@ -116,7 +116,7 @@ def should_use_remote_autograd_cache():
|
|||||||
|
|
||||||
|
|
||||||
def should_use_local_autograd_cache():
|
def should_use_local_autograd_cache():
|
||||||
if torch.compiler.config.force_disable_caches:
|
if torch._inductor.config.force_disable_caches:
|
||||||
return False
|
return False
|
||||||
return config.enable_autograd_cache
|
return config.enable_autograd_cache
|
||||||
|
|
||||||
|
@ -138,8 +138,12 @@ autotune_remote_cache: Optional[bool] = autotune_remote_cache_default()
|
|||||||
# None: Not set -- Off for OSS, JustKnobs based for internal
|
# None: Not set -- Off for OSS, JustKnobs based for internal
|
||||||
bundled_autotune_remote_cache: Optional[bool] = bundled_autotune_remote_cache_default()
|
bundled_autotune_remote_cache: Optional[bool] = bundled_autotune_remote_cache_default()
|
||||||
|
|
||||||
# See torch.compiler.force_disable_caches
|
# Force disabled all inductor level caching -- This will override any other caching flag
|
||||||
force_disable_caches: bool = Config(alias="torch.compiler.config.force_disable_caches")
|
force_disable_caches: bool = Config(
|
||||||
|
justknob="pytorch/remote_cache:force_disable_caches",
|
||||||
|
env_name_force="TORCHINDUCTOR_FORCE_DISABLE_CACHES",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
|
||||||
# Unsafe way to skip dynamic shape guards to get faster cache load
|
# Unsafe way to skip dynamic shape guards to get faster cache load
|
||||||
unsafe_skip_cache_dynamic_shape_guards: bool = False
|
unsafe_skip_cache_dynamic_shape_guards: bool = False
|
||||||
|
@ -66,18 +66,6 @@ Tag to be included in the cache key generation for all torch compile caching.
|
|||||||
A common use case for such a tag is to break caches.
|
A common use case for such a tag is to break caches.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
force_disable_caches: bool = Config(
|
|
||||||
justknob="pytorch/remote_cache:force_disable_caches",
|
|
||||||
env_name_force=[
|
|
||||||
"TORCHINDUCTOR_FORCE_DISABLE_CACHES",
|
|
||||||
"TORCH_COMPILE_FORCE_DISABLE_CACHES",
|
|
||||||
],
|
|
||||||
default=False,
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
Force disables all caching -- This will take precedence over and override any other caching flag
|
|
||||||
"""
|
|
||||||
|
|
||||||
dynamic_sources: str = Config(
|
dynamic_sources: str = Config(
|
||||||
env_name_default="TORCH_COMPILE_DYNAMIC_SOURCES", default=""
|
env_name_default="TORCH_COMPILE_DYNAMIC_SOURCES", default=""
|
||||||
)
|
)
|
||||||
|
Reference in New Issue
Block a user