mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
Use hidden_size_per_head as head_size fallback (#24221)
Signed-off-by: nopperl <54780682+nopperl@users.noreply.github.com>
This commit is contained in:
@ -1426,6 +1426,11 @@ class ModelConfig:
|
||||
if getattr(self.hf_text_config, "head_dim", None) is not None:
|
||||
return self.hf_text_config.head_dim
|
||||
|
||||
# NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
|
||||
if getattr(self.hf_text_config, "hidden_size_per_head",
|
||||
None) is not None:
|
||||
return self.hf_text_config.hidden_size_per_head
|
||||
|
||||
# FIXME(woosuk): This may not be true for all models.
|
||||
return (self.hf_text_config.hidden_size //
|
||||
self.hf_text_config.num_attention_heads)
|
||||
|
Reference in New Issue
Block a user