mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Misc] fix typo in the multimodal doc (#23051)
This commit is contained in:
@ -216,7 +216,7 @@ Instead of NumPy arrays, you can also pass `'torch.Tensor'` instances, as shown
|
||||
from vllm import LLM, SamplingParams
|
||||
from qwen_vl_utils import process_vision_info
|
||||
|
||||
model_path = "Qwen/Qwen2.5-VL-3B-Instruct/"
|
||||
model_path = "Qwen/Qwen2.5-VL-3B-Instruct"
|
||||
video_path = "https://content.pexels.com/videos/free-videos.mp4"
|
||||
|
||||
llm = LLM(
|
||||
|
Reference in New Issue
Block a user