Support for NemotronH Nano VLM (#23644)

Signed-off-by: Daniel Afrimi <danielafrimi8@gmail.com>
This commit is contained in:
danielafrimi
2025-09-10 16:10:06 +03:00
committed by GitHub
parent 8b83b93739
commit 72d30108a0
4 changed files with 1400 additions and 1 deletions

View File

@ -515,6 +515,9 @@ _MULTIMODAL_EXAMPLE_MODELS = {
trust_remote_code=True),
"Llama_Nemotron_Nano_VL" : _HfExamplesInfo("nvidia/Llama-3.1-Nemotron-Nano-VL-8B-V1", # noqa: E501
trust_remote_code=True),
"NemotronH_Nano_VL": _HfExamplesInfo("nano_vl_dummy",
is_available_online=False,
trust_remote_code=True),
"Ovis": _HfExamplesInfo("AIDC-AI/Ovis2-1B", trust_remote_code=True,
max_transformers_version="4.53",
transformers_version_reason="HF model is not compatible", # noqa: E501

View File

@ -1552,7 +1552,7 @@ class ModelConfig:
for bc in block_configs[start:end])
else:
# Hybrid model Jamba
layers_block_type_value = getattr(self.hf_config,
layers_block_type_value = getattr(self.hf_text_config,
"layers_block_type", None)
if layers_block_type_value is not None:
if hasattr(self.hf_text_config,

File diff suppressed because it is too large Load Diff

View File

@ -223,6 +223,7 @@ _MULTIMODAL_MODELS = {
"GraniteSpeechForConditionalGeneration": ("granite_speech", "GraniteSpeechForConditionalGeneration"), # noqa: E501
"H2OVLChatModel": ("h2ovl", "H2OVLChatModel"),
"InternVLChatModel": ("internvl", "InternVLChatModel"),
"NemotronH_Nano_VL": ("nano_nemotron_vl", "NemotronH_Nano_VL"),
"InternS1ForConditionalGeneration": ("interns1", "InternS1ForConditionalGeneration"), # noqa: E501
"InternVLForConditionalGeneration": ("interns1", "InternS1ForConditionalGeneration"), # noqa: E501
"Idefics3ForConditionalGeneration":("idefics3","Idefics3ForConditionalGeneration"),