mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Chore] Use max_transformers_version
for Qwen-VL test (#26792)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
@ -707,8 +707,6 @@ VLM_TEST_SETTINGS = {
|
|||||||
max_num_seqs=2,
|
max_num_seqs=2,
|
||||||
vllm_output_post_proc=model_utils.qwen_vllm_to_hf_output,
|
vllm_output_post_proc=model_utils.qwen_vllm_to_hf_output,
|
||||||
prompt_path_encoder=model_utils.qwen_prompt_path_encoder,
|
prompt_path_encoder=model_utils.qwen_prompt_path_encoder,
|
||||||
# FIXME: https://github.com/huggingface/transformers/issues/38358
|
|
||||||
marks=[pytest.mark.skip("Model initialization fails")],
|
|
||||||
),
|
),
|
||||||
"qwen2_vl": VLMTestInfo(
|
"qwen2_vl": VLMTestInfo(
|
||||||
models=["Qwen/Qwen2-VL-2B-Instruct"],
|
models=["Qwen/Qwen2-VL-2B-Instruct"],
|
||||||
|
@ -752,6 +752,8 @@ _MULTIMODAL_EXAMPLE_MODELS = {
|
|||||||
"Qwen/Qwen-VL",
|
"Qwen/Qwen-VL",
|
||||||
extras={"chat": "Qwen/Qwen-VL-Chat"},
|
extras={"chat": "Qwen/Qwen-VL-Chat"},
|
||||||
trust_remote_code=True,
|
trust_remote_code=True,
|
||||||
|
max_transformers_version="4.53.3",
|
||||||
|
transformers_version_reason="Use of deprecated imports which have been removed.", # noqa: E501
|
||||||
hf_overrides={"architectures": ["QwenVLForConditionalGeneration"]},
|
hf_overrides={"architectures": ["QwenVLForConditionalGeneration"]},
|
||||||
),
|
),
|
||||||
"Qwen2AudioForConditionalGeneration": _HfExamplesInfo(
|
"Qwen2AudioForConditionalGeneration": _HfExamplesInfo(
|
||||||
|
Reference in New Issue
Block a user