[Fix] check to make sure processor has chat templates (#18047)

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>
This commit is contained in:
Aaron Pham
2025-05-13 06:04:10 -04:00
committed by GitHub
parent 98fcba1575
commit cb528d0585
3 changed files with 4 additions and 4 deletions

View File

@ -22,7 +22,7 @@ def test_bad_callable():
pass_manager.configure(config)
with pytest.raises(AssertionError):
pass_manager.add(simple_callable) # noqa, type wrong on purpose
pass_manager.add(simple_callable)
# Pass that inherits from InductorPass

View File

@ -16,7 +16,7 @@ if is_torch_equal_or_newer("2.6"):
from torch._inductor.custom_graph_pass import CustomGraphPass
else:
# CustomGraphPass is not present in 2.5 or lower, import our version
from .torch25_custom_graph_pass import ( # noqa: yapf
from .torch25_custom_graph_pass import ( # noqa: E501
Torch25CustomGraphPass as CustomGraphPass)
_pass_context = None

View File

@ -349,11 +349,11 @@ def resolve_hf_chat_template(
trust_remote_code=model_config.trust_remote_code,
)
if isinstance(processor, ProcessorMixin) and \
hasattr(processor, 'chat_template') and \
processor.chat_template is not None:
return processor.chat_template
except Exception:
logger.debug("Failed to load AutoProcessor chat template for %s",
tokenizer.name_or_path, exc_info=True)
logger.debug("Failed to load AutoProcessor chat template for %s", tokenizer.name_or_path, exc_info=True) # noqa: E501
# 3rd priority: AutoTokenizer chat template
try: