mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Misc] Suppress duplicated logging regarding multimodal input pipeline (#10530)
Signed-off-by: Roger Wang <ywang@roblox.com>
This commit is contained in:
@ -10,7 +10,7 @@ from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
|
||||
from vllm.multimodal.processing import MultiModalDataDict, MultiModalInputsV2
|
||||
from vllm.prompt_adapter.request import PromptAdapterRequest
|
||||
from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup
|
||||
from vllm.utils import print_warning_once
|
||||
from vllm.utils import print_info_once, print_warning_once
|
||||
|
||||
from .data import (DecoderOnlyInputs, EncoderDecoderInputs, ProcessorInputs,
|
||||
PromptType, SingletonInputs, SingletonPrompt, token_inputs)
|
||||
@ -212,7 +212,7 @@ class InputPreprocessor:
|
||||
# updated to use the new multi-modal processor
|
||||
can_process_multimodal = self.mm_registry.has_processor(model_config)
|
||||
if not can_process_multimodal:
|
||||
logger.info(
|
||||
print_info_once(
|
||||
"Your model uses the legacy input pipeline instead of the new "
|
||||
"multi-modal processor. Please note that the legacy pipeline "
|
||||
"will be removed in a future release. For more details, see: "
|
||||
|
@ -705,6 +705,12 @@ def create_kv_caches_with_random(
|
||||
return key_caches, value_caches
|
||||
|
||||
|
||||
@lru_cache
|
||||
def print_info_once(msg: str) -> None:
|
||||
# Set the stacklevel to 2 to print the caller's line info
|
||||
logger.info(msg, stacklevel=2)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def print_warning_once(msg: str) -> None:
|
||||
# Set the stacklevel to 2 to print the caller's line info
|
||||
|
Reference in New Issue
Block a user