[CI/Build] Fix tensorizer test for load_format change (#22583)

Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com>
This commit is contained in:
22quinn
2025-08-10 19:30:00 -07:00
committed by GitHub
parent 06da44f0cb
commit b799f4b9ea
3 changed files with 3 additions and 4 deletions

View File

@ -426,7 +426,6 @@ steps:
- label: Tensorizer Test # 11min
mirror_hardwares: [amdexperimental]
soft_fail: true
source_file_dependencies:
- vllm/model_executor/model_loader
- tests/tensorizer_loader

View File

@ -44,7 +44,7 @@ def model_uri(tmp_dir):
def tensorize_model_and_lora(tmp_dir, model_uri):
tensorizer_config = TensorizerConfig(tensorizer_uri=model_uri,
lora_dir=tmp_dir)
args = EngineArgs(model=MODEL_NAME, device="cuda")
args = EngineArgs(model=MODEL_NAME)
tensorize_lora_adapter(LORA_PATH, tensorizer_config)
tensorize_vllm_model(args, tensorizer_config)

View File

@ -166,7 +166,7 @@ def test_load_without_tensorizer_load_format(vllm_runner, capfd, model_ref):
combined_output = out + err
assert ("ValueError: Model loader extra config "
"is not supported for load "
"format LoadFormat.AUTO") in combined_output
"format auto") in combined_output
finally:
del model
gc.collect()
@ -186,7 +186,7 @@ def test_raise_value_error_on_invalid_load_format(vllm_runner, capfd,
combined_output = out + err
assert ("ValueError: Model loader extra config is not supported "
"for load format LoadFormat.SAFETENSORS") in combined_output
"for load format safetensors") in combined_output
finally:
del model
gc.collect()