[Chore] Use max_transformers_version for Qwen-VL test (#26792)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung
2025-10-14 18:03:46 +08:00
committed by GitHub
parent 7e6edb1469
commit d1d063a588
2 changed files with 2 additions and 2 deletions

View File

@ -707,8 +707,6 @@ VLM_TEST_SETTINGS = {
max_num_seqs=2,
vllm_output_post_proc=model_utils.qwen_vllm_to_hf_output,
prompt_path_encoder=model_utils.qwen_prompt_path_encoder,
# FIXME: https://github.com/huggingface/transformers/issues/38358
marks=[pytest.mark.skip("Model initialization fails")],
),
"qwen2_vl": VLMTestInfo(
models=["Qwen/Qwen2-VL-2B-Instruct"],

View File

@ -752,6 +752,8 @@ _MULTIMODAL_EXAMPLE_MODELS = {
"Qwen/Qwen-VL",
extras={"chat": "Qwen/Qwen-VL-Chat"},
trust_remote_code=True,
max_transformers_version="4.53.3",
transformers_version_reason="Use of deprecated imports which have been removed.", # noqa: E501
hf_overrides={"architectures": ["QwenVLForConditionalGeneration"]},
),
"Qwen2AudioForConditionalGeneration": _HfExamplesInfo(