[Bugfix] Fix basic models tests hanging due to mm processor creation (#22571)

Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn>
This commit is contained in:
Isotr0py
2025-08-10 02:42:21 +08:00
committed by GitHub
parent 5a16fa614c
commit fbd8595c5c

View File

@ -138,8 +138,8 @@ class MultiModalRegistry:
if not model_config.is_multimodal_model:
return False
processor = self.create_processor(model_config, disable_cache=False)
supported_modalities = processor.info.get_supported_mm_limits()
info = self._create_processing_info(model_config, tokenizer=None)
supported_modalities = info.get_supported_mm_limits()
mm_config = model_config.get_multimodal_config()
@ -278,6 +278,26 @@ class MultiModalRegistry:
model_cls, _ = get_model_architecture(model_config)
return model_cls
def _create_processing_ctx(
self,
model_config: "ModelConfig",
tokenizer: Optional[AnyTokenizer] = None,
) -> InputProcessingContext:
if tokenizer is None and not model_config.skip_tokenizer_init:
tokenizer = cached_tokenizer_from_config(model_config)
return InputProcessingContext(model_config, tokenizer)
def _create_processing_info(
self,
model_config: "ModelConfig",
*,
tokenizer: Optional[AnyTokenizer] = None,
) -> BaseProcessingInfo:
model_cls = self._get_model_cls(model_config)
factories = self._processor_factories[model_cls]
ctx = self._create_processing_ctx(model_config, tokenizer)
return factories.info(ctx)
def create_processor(
self,
model_config: "ModelConfig",
@ -291,15 +311,13 @@ class MultiModalRegistry:
if not model_config.is_multimodal_model:
raise ValueError(f"{model_config.model} is not a multimodal model")
if tokenizer is None and not model_config.skip_tokenizer_init:
tokenizer = cached_tokenizer_from_config(model_config)
if disable_cache is None:
disable_cache = not model_config.enable_mm_processor_cache
model_cls = self._get_model_cls(model_config)
factories = self._processor_factories[model_cls]
ctx = InputProcessingContext(model_config, tokenizer)
ctx = self._create_processing_ctx(model_config, tokenizer)
cache = None if disable_cache else self._get_processor_cache(
model_config)