[Bugfix] Add image placeholder for OpenAI Compatible Server of MiniCPM-V (#6787)

Co-authored-by: hezhihui <hzh7269@modelbest.cn>
Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
This commit is contained in:
Alphi
2024-07-26 00:42:49 +08:00
committed by GitHub
parent 316a41ac1d
commit b75e314fff
2 changed files with 5 additions and 1 deletions

View File

@ -4,6 +4,8 @@ from vllm import LLM, SamplingParams
from vllm.assets.image import ImageAsset
# 2.0
# The official repo doesn't work yet, so we need to use a fork for now
# For more details, please see: See: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630
# MODEL_NAME = "HwwwH/MiniCPM-V-2"
# 2.5
MODEL_NAME = "openbmb/MiniCPM-Llama3-V-2_5"

View File

@ -100,7 +100,9 @@ def _image_token_str(model_config: ModelConfig,
if model_type == "phi3_v":
# Workaround since this token is not defined in the tokenizer
return "<|image_1|>"
if model_type in ("blip-2", "chatglm", "fuyu", "minicpmv", "paligemma"):
if model_type == "minicpmv":
return "()"
if model_type in ("blip-2", "chatglm", "fuyu", "paligemma"):
# These models do not use image tokens in the prompt
return None
if model_type.startswith("llava"):