[Bugfix] Fix Phi3.5 mini and MoE LoRA inference (#8571)

This commit is contained in:
Amit Garg
2024-09-19 17:54:02 -07:00
committed by GitHub
parent de6f90a13d
commit 18ae428a0d
3 changed files with 22 additions and 1 deletions

View File

@ -50,7 +50,7 @@ _GENERATION_MODELS = {
"OrionForCausalLM": ("orion", "OrionForCausalLM"),
"PersimmonForCausalLM": ("persimmon", "PersimmonForCausalLM"),
"PhiForCausalLM": ("phi", "PhiForCausalLM"),
"Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
"Phi3ForCausalLM": ("phi3", "Phi3ForCausalLM"),
"PhiMoEForCausalLM": ("phimoe", "PhiMoEForCausalLM"),
"Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
"Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),

View File

@ -0,0 +1,17 @@
# coding=utf-8
# Adapted from llama.py
"""Inference-only Phi3 model code inherit from Llama.py"""
from vllm.model_executor.models.llama import LlamaForCausalLM
class Phi3ForCausalLM(LlamaForCausalLM):
packed_modules_mapping = {
"qkv_proj": [
"qkv_proj",
],
"gate_up_proj": [
"gate_up_proj",
],
}

View File

@ -491,6 +491,10 @@ class PhiMoEForCausalLM(nn.Module, SupportsLoRA):
"o_proj",
"embed_tokens",
"lm_head",
"w1",
"w2",
"w3",
"gate",
]
embedding_modules = {
"embed_tokens": "input_embeddings",