mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Fix] check to make sure processor has chat templates (#18047)
Signed-off-by: Aaron Pham <contact@aarnphm.xyz>
This commit is contained in:
@ -22,7 +22,7 @@ def test_bad_callable():
|
||||
pass_manager.configure(config)
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
pass_manager.add(simple_callable) # noqa, type wrong on purpose
|
||||
pass_manager.add(simple_callable)
|
||||
|
||||
|
||||
# Pass that inherits from InductorPass
|
||||
|
@ -16,7 +16,7 @@ if is_torch_equal_or_newer("2.6"):
|
||||
from torch._inductor.custom_graph_pass import CustomGraphPass
|
||||
else:
|
||||
# CustomGraphPass is not present in 2.5 or lower, import our version
|
||||
from .torch25_custom_graph_pass import ( # noqa: yapf
|
||||
from .torch25_custom_graph_pass import ( # noqa: E501
|
||||
Torch25CustomGraphPass as CustomGraphPass)
|
||||
|
||||
_pass_context = None
|
||||
|
@ -349,11 +349,11 @@ def resolve_hf_chat_template(
|
||||
trust_remote_code=model_config.trust_remote_code,
|
||||
)
|
||||
if isinstance(processor, ProcessorMixin) and \
|
||||
hasattr(processor, 'chat_template') and \
|
||||
processor.chat_template is not None:
|
||||
return processor.chat_template
|
||||
except Exception:
|
||||
logger.debug("Failed to load AutoProcessor chat template for %s",
|
||||
tokenizer.name_or_path, exc_info=True)
|
||||
logger.debug("Failed to load AutoProcessor chat template for %s", tokenizer.name_or_path, exc_info=True) # noqa: E501
|
||||
|
||||
# 3rd priority: AutoTokenizer chat template
|
||||
try:
|
||||
|
Reference in New Issue
Block a user