[Bugfix] Allow shared_experts skip quantization for DeepSeekV2/V3 (#14100)

Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
Michael Goin
2025-03-03 16:20:24 -05:00
committed by GitHub
parent ae122b1cbd
commit 2b04c209ee

View File

@ -145,6 +145,7 @@ class DeepseekV2MoE(nn.Module):
hidden_act=config.hidden_act,
quant_config=quant_config,
reduce_results=False,
prefix=f"{prefix}.shared_experts",
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: