[CI/Build] Use vLLM client's user agent to fetch images (#23561)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
@ -6,8 +6,6 @@ import json
|
||||
import openai
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
import requests
|
||||
from PIL import Image
|
||||
from transformers import AutoProcessor
|
||||
|
||||
from vllm.multimodal.utils import encode_image_base64, fetch_image
|
||||
@ -36,7 +34,7 @@ EXPECTED_MM_BEAM_SEARCH_RES = [
|
||||
],
|
||||
[
|
||||
"The image shows a Venn diagram with three over",
|
||||
"The image shows a Venn diagram with three intersect",
|
||||
"This image shows a Venn diagram with three intersect",
|
||||
],
|
||||
[
|
||||
"This image displays a gradient of colors ranging from",
|
||||
@ -88,7 +86,7 @@ def get_hf_prompt_tokens(model_name, content, image_url):
|
||||
"role": "user",
|
||||
"content": f"{placeholder}{content}",
|
||||
}]
|
||||
images = [Image.open(requests.get(image_url, stream=True).raw)]
|
||||
images = [fetch_image(image_url)]
|
||||
|
||||
prompt = processor.tokenizer.apply_chat_template(
|
||||
messages, tokenize=False, add_generation_prompt=True)
|
||||
|
@ -5,7 +5,6 @@ import json
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
from PIL import Image
|
||||
from transformers import AutoProcessor
|
||||
|
||||
from vllm.entrypoints.openai.protocol import EmbeddingResponse
|
||||
@ -64,7 +63,7 @@ def get_hf_prompt_tokens(model_name, content, image_url):
|
||||
|
||||
placeholder = "<|image_1|> "
|
||||
prompt = f"{placeholder}{content}"
|
||||
images = [Image.open(requests.get(image_url, stream=True).raw)]
|
||||
images = [fetch_image(image_url)]
|
||||
inputs = processor(prompt, images, return_tensors="pt")
|
||||
return inputs.input_ids.shape[1]
|
||||
|
||||
|
Reference in New Issue
Block a user