mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Bugfix] Limit profiling run sequence length by max_model_len (#14785)
Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
This commit is contained in:
@ -330,6 +330,11 @@ class InputRegistry:
|
||||
from vllm.multimodal import MultiModalKwargs
|
||||
from vllm.multimodal.profiling import MultiModalProfiler
|
||||
|
||||
if seq_len > model_config.max_model_len:
|
||||
raise AssertionError(
|
||||
f"Profiling attempted with sequence length ({seq_len}) "
|
||||
f"greater than model length ({model_config.max_model_len})")
|
||||
|
||||
if mm_registry.has_processor(model_config):
|
||||
tokenizer = cached_tokenizer_from_config(model_config)
|
||||
processor = mm_registry.create_processor(model_config,
|
||||
|
@ -281,6 +281,7 @@ class EncoderDecoderModelRunner(GPUModelRunnerBase[EncoderDecoderModelInput]):
|
||||
for group_id in range(max_num_seqs):
|
||||
seq_len = (max_num_batched_tokens // max_num_seqs +
|
||||
(group_id < max_num_batched_tokens % max_num_seqs))
|
||||
seq_len = min(seq_len, self.model_config.max_model_len)
|
||||
batch_size += seq_len
|
||||
|
||||
decoder_dummy_data = self.input_registry \
|
||||
|
@ -1302,6 +1302,7 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
|
||||
for group_id in range(max_num_seqs):
|
||||
seq_len = (max_num_batched_tokens // max_num_seqs +
|
||||
(group_id < max_num_batched_tokens % max_num_seqs))
|
||||
seq_len = min(seq_len, self.model_config.max_model_len)
|
||||
batch_size += seq_len
|
||||
|
||||
dummy_data = self.input_registry \
|
||||
|
@ -148,6 +148,7 @@ class OpenVINOModelRunner(ModelRunnerBase):
|
||||
seq_len = min(
|
||||
seq_data.get_len(),
|
||||
computed_len + seq_group_metadata.token_chunk_size,
|
||||
self.model_config.max_model_len,
|
||||
)
|
||||
if is_prompt:
|
||||
tokens = seq_data.get_token_ids()[computed_len:seq_len]
|
||||
|
@ -466,6 +466,7 @@ class XPUModelRunner(ModelRunnerBase[ModelInputForXPUWithSamplingMetadata]):
|
||||
for group_id in range(max_num_seqs):
|
||||
seq_len = (max_num_batched_tokens // max_num_seqs +
|
||||
(group_id < max_num_batched_tokens % max_num_seqs))
|
||||
seq_len = min(seq_len, self.model_config.max_model_len)
|
||||
batch_size += seq_len
|
||||
|
||||
dummy_data = self.input_registry \
|
||||
|
Reference in New Issue
Block a user