[Perf] Reduce peak memory usage of llama (#10339)

Signed-off-by: andoorve <37849411+andoorve@users.noreply.github.com>
This commit is contained in:
Murali Andoorveedu
2024-11-14 16:38:20 -08:00
committed by GitHub
parent 4a18fd14ba
commit b2e0ad3b59

View File

@ -90,8 +90,8 @@ class LlamaMLP(nn.Module):
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.gate_up_proj(x)
x = self.act_fn(x)
x, _ = self.down_proj(x)
return x