mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Bugfix] Add image placeholder for OpenAI Compatible Server of MiniCPM-V (#6787)
Co-authored-by: hezhihui <hzh7269@modelbest.cn> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
This commit is contained in:
@ -4,6 +4,8 @@ from vllm import LLM, SamplingParams
|
||||
from vllm.assets.image import ImageAsset
|
||||
|
||||
# 2.0
|
||||
# The official repo doesn't work yet, so we need to use a fork for now
|
||||
# For more details, please see: See: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630
|
||||
# MODEL_NAME = "HwwwH/MiniCPM-V-2"
|
||||
# 2.5
|
||||
MODEL_NAME = "openbmb/MiniCPM-Llama3-V-2_5"
|
||||
|
@ -100,7 +100,9 @@ def _image_token_str(model_config: ModelConfig,
|
||||
if model_type == "phi3_v":
|
||||
# Workaround since this token is not defined in the tokenizer
|
||||
return "<|image_1|>"
|
||||
if model_type in ("blip-2", "chatglm", "fuyu", "minicpmv", "paligemma"):
|
||||
if model_type == "minicpmv":
|
||||
return "()"
|
||||
if model_type in ("blip-2", "chatglm", "fuyu", "paligemma"):
|
||||
# These models do not use image tokens in the prompt
|
||||
return None
|
||||
if model_type.startswith("llava"):
|
||||
|
Reference in New Issue
Block a user