[Bugfix] Limit profiling run sequence length by max_model_len (#14785)

Signed-off-by: Kyle Sayers <kylesayrs@gmail.com>
This commit is contained in:
Kyle Sayers
2025-03-16 10:44:19 -04:00
committed by GitHub
parent d1ad2a57af
commit d30aa7e9e6
5 changed files with 9 additions and 0 deletions

View File

@ -330,6 +330,11 @@ class InputRegistry:
from vllm.multimodal import MultiModalKwargs
from vllm.multimodal.profiling import MultiModalProfiler
if seq_len > model_config.max_model_len:
raise AssertionError(
f"Profiling attempted with sequence length ({seq_len}) "
f"greater than model length ({model_config.max_model_len})")
if mm_registry.has_processor(model_config):
tokenizer = cached_tokenizer_from_config(model_config)
processor = mm_registry.create_processor(model_config,

View File

@ -281,6 +281,7 @@ class EncoderDecoderModelRunner(GPUModelRunnerBase[EncoderDecoderModelInput]):
for group_id in range(max_num_seqs):
seq_len = (max_num_batched_tokens // max_num_seqs +
(group_id < max_num_batched_tokens % max_num_seqs))
seq_len = min(seq_len, self.model_config.max_model_len)
batch_size += seq_len
decoder_dummy_data = self.input_registry \

View File

@ -1302,6 +1302,7 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
for group_id in range(max_num_seqs):
seq_len = (max_num_batched_tokens // max_num_seqs +
(group_id < max_num_batched_tokens % max_num_seqs))
seq_len = min(seq_len, self.model_config.max_model_len)
batch_size += seq_len
dummy_data = self.input_registry \

View File

@ -148,6 +148,7 @@ class OpenVINOModelRunner(ModelRunnerBase):
seq_len = min(
seq_data.get_len(),
computed_len + seq_group_metadata.token_chunk_size,
self.model_config.max_model_len,
)
if is_prompt:
tokens = seq_data.get_token_ids()[computed_len:seq_len]

View File

@ -466,6 +466,7 @@ class XPUModelRunner(ModelRunnerBase[ModelInputForXPUWithSamplingMetadata]):
for group_id in range(max_num_seqs):
seq_len = (max_num_batched_tokens // max_num_seqs +
(group_id < max_num_batched_tokens % max_num_seqs))
seq_len = min(seq_len, self.model_config.max_model_len)
batch_size += seq_len
dummy_data = self.input_registry \