mirror of
https://github.com/vllm-project/vllm.git
synced 2025-10-20 14:53:52 +08:00
[Bug] Fix usage of .transpose()
and .view()
consecutively. (#11979)
This commit is contained in:
@ -230,7 +230,7 @@ class MultiHeadAttention(nn.Module):
|
||||
value,
|
||||
scale=self.scale)
|
||||
out = out.transpose(1, 2)
|
||||
return out.view(bsz, q_len, -1)
|
||||
return out.reshape(bsz, q_len, -1)
|
||||
|
||||
|
||||
def unified_attention(
|
||||
|
@ -271,7 +271,7 @@ class InternSdpaAttention(nn.Module):
|
||||
v = v.transpose(1, 2)
|
||||
|
||||
x = F.scaled_dot_product_attention(q, k, v, scale=self.scale)
|
||||
x = x.transpose(1, 2).view(B, N, -1)
|
||||
x = x.transpose(1, 2).reshape(B, N, -1)
|
||||
|
||||
x = self.proj(x)
|
||||
return x
|
||||
|
Reference in New Issue
Block a user