mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 23:03:52 +08:00
[Bug] Fix usage of .transpose() and .view() consecutively. (#11979)
This commit is contained in:
@ -230,7 +230,7 @@ class MultiHeadAttention(nn.Module):
|
|||||||
value,
|
value,
|
||||||
scale=self.scale)
|
scale=self.scale)
|
||||||
out = out.transpose(1, 2)
|
out = out.transpose(1, 2)
|
||||||
return out.view(bsz, q_len, -1)
|
return out.reshape(bsz, q_len, -1)
|
||||||
|
|
||||||
|
|
||||||
def unified_attention(
|
def unified_attention(
|
||||||
|
|||||||
@ -271,7 +271,7 @@ class InternSdpaAttention(nn.Module):
|
|||||||
v = v.transpose(1, 2)
|
v = v.transpose(1, 2)
|
||||||
|
|
||||||
x = F.scaled_dot_product_attention(q, k, v, scale=self.scale)
|
x = F.scaled_dot_product_attention(q, k, v, scale=self.scale)
|
||||||
x = x.transpose(1, 2).view(B, N, -1)
|
x = x.transpose(1, 2).reshape(B, N, -1)
|
||||||
|
|
||||||
x = self.proj(x)
|
x = self.proj(x)
|
||||||
return x
|
return x
|
||||||
|
|||||||
Reference in New Issue
Block a user