[v1] fix use compile sizes (#11000)

Signed-off-by: youkaichao <youkaichao@gmail.com>
This commit is contained in:
youkaichao
2024-12-09 13:47:24 -08:00
committed by GitHub
parent cbcbdb1ceb
commit 1a2f8fb828
2 changed files with 4 additions and 0 deletions

View File

@ -2522,6 +2522,7 @@ class VllmConfig:
self.compilation_config.custom_ops = ["none"]
self.compilation_config.use_cudagraph = True
self.compilation_config.use_inductor = True
self.compilation_config.cudagraph_num_of_warmups = 1
self.compilation_config.pass_config.enable_fusion = False
self.compilation_config.pass_config.enable_reshape = False
self.compilation_config.level = CompilationLevel.PIECEWISE

View File

@ -582,6 +582,9 @@ class GPUModelRunner:
# can reuse the memory pool allocated for the large shapes.
with graph_capture():
for num_tokens in reversed(self.cudagraph_batch_sizes):
for _ in range(self.vllm_config.compilation_config.
cudagraph_num_of_warmups):
self._dummy_run(self.model, num_tokens, self.kv_caches)
self._dummy_run(self.model, num_tokens, self.kv_caches)
end_time = time.perf_counter()