[Misc] refactor code by import as for torch._inductor.config (#23677)

Signed-off-by: Andy Xie <andy.xning@gmail.com>
This commit is contained in:
Ning Xie
2025-09-01 14:05:42 +08:00
committed by GitHub
parent ff0e59d83a
commit 499b074bfd

View File

@ -138,14 +138,14 @@ def _torch_cuda_wrapper():
@contextmanager
def _set_global_compilation_settings(config: VllmConfig):
import torch._inductor.config
import torch._inductor.config as torch_inductor_config
inductor_config = config.compilation_config.inductor_compile_config
# Note: The MKLDNN and CPPGEMM backend requires freezing parameters.
freezing_value = torch._inductor.config.freezing
freezing_value = torch_inductor_config.freezing
try:
if inductor_config.get("max_autotune", False):
torch._inductor.config.freezing = True
torch_inductor_config.freezing = True
yield
finally:
torch._inductor.config.freezing = freezing_value
torch_inductor_config.freezing = freezing_value