[Bugfix] fix flashinfer cudagraph capture for PP (#6708)

This commit is contained in:
William Lin
2024-07-23 18:49:44 -07:00
committed by GitHub
parent 87525fab92
commit 5e8ca973eb
2 changed files with 31 additions and 7 deletions

View File

@ -61,3 +61,27 @@ def test_compare_tp(TP_SIZE, PP_SIZE, EAGER_MODE, CHUNKED_PREFILL, MODEL_NAME,
tp_args.append("--enforce-eager")
compare_two_settings(MODEL_NAME, pp_args, tp_args)
@pytest.mark.parametrize("PP_SIZE, MODEL_NAME", [
(2, "JackFram/llama-160m"),
])
@pytest.mark.parametrize("ATTN_BACKEND", [
"FLASH_ATTN",
"FLASHINFER",
])
def test_pp_cudagraph(PP_SIZE, MODEL_NAME, ATTN_BACKEND):
cudagraph_args = [
# use half precision for speed and memory savings in CI environment
"--dtype",
"float16",
"--pipeline-parallel-size",
str(PP_SIZE),
"--distributed-executor-backend",
"ray",
]
os.environ["VLLM_ATTENTION_BACKEND"] = ATTN_BACKEND
eager_args = cudagraph_args + ["--enforce-eager"]
compare_two_settings(MODEL_NAME, eager_args, cudagraph_args)

View File

@ -1040,9 +1040,9 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
self.parallel_config.pipeline_parallel_size):
for batch_size in reversed(batch_size_capture_list):
if self.attn_backend.get_name() == "flashinfer":
indptr_buffer = indptr_buffer[:batch_size + 1]
last_page_len_buffer = last_page_len_buffer[:
batch_size]
_indptr_buffer = indptr_buffer[:batch_size + 1]
_last_page_len_buffer = last_page_len_buffer[:
batch_size]
num_qo_heads = (
self.model_config.get_num_attention_heads(
@ -1055,8 +1055,8 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
use_tensor_cores = False
decode_wrapper = \
CUDAGraphBatchDecodeWithPagedKVCacheWrapper(
decode_workspace_buffer, indptr_buffer,
indices_buffer, last_page_len_buffer, "NHD",
decode_workspace_buffer, _indptr_buffer,
indices_buffer, _last_page_len_buffer, "NHD",
use_tensor_cores)
kv_cache_dtype = get_kv_cache_torch_dtype(
self.kv_cache_dtype, self.model_config.dtype)
@ -1131,10 +1131,10 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
self.model, self.attn_backend.get_name())
if self.attn_backend.get_name() == "flashinfer":
graph_runner.flashinfer_indptr_buffer = indptr_buffer
graph_runner.flashinfer_indptr_buffer = _indptr_buffer
graph_runner.flashinfer_indices_buffer = indices_buffer
graph_runner.flashinfer_last_page_len_buffer = \
last_page_len_buffer
_last_page_len_buffer
graph_runner.flashinfer_decode_workspace_buffer = \
decode_workspace_buffer
graph_runner.flashinfer_decode_wrapper = \