mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[V1] Enable Inductor when using piecewise CUDA graphs (#10268)
Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu>
This commit is contained in:
@ -404,14 +404,17 @@ class GPUModelRunner:
|
||||
|
||||
def load_model(self) -> None:
|
||||
if self.use_cuda_graph:
|
||||
# FIXME(woosuk): Currently, we do not use inductor to reduce the
|
||||
# compilation time and any potential issues with the inductor.
|
||||
os.environ["VLLM_CUSTOM_OPS"] = "all"
|
||||
# NOTE(woosuk): Currently, we use inductor because the piecewise
|
||||
# CUDA graphs do not work properly with the custom CUDA kernels.
|
||||
# FIXME(woosuk): Disable inductor to reduce the compilation time
|
||||
# and avoid any potential issues with the inductor.
|
||||
os.environ["VLLM_CUSTOM_OPS"] = "none"
|
||||
set_compilation_config(
|
||||
CompilationConfig(
|
||||
use_cudagraph=True,
|
||||
non_cudagraph_ops=["vllm.unified_v1_flash_attention"],
|
||||
use_inductor=False,
|
||||
use_inductor=True,
|
||||
enable_fusion=False,
|
||||
))
|
||||
|
||||
logger.info("Starting to load model %s...", self.model_config.model)
|
||||
|
Reference in New Issue
Block a user