mirror of
https://github.com/vllm-project/vllm-ascend.git
synced 2025-10-21 06:10:33 +08:00
### What this PR does / why we need it? See RFC: https://github.com/vllm-project/vllm-ascend/issues/2470 This PR add a new kv connector for layer-wised kv transfer ### Does this PR introduce _any_ user-facing change? yes, a new kv connector is added. User can use layer wised feature now. ### How was this patch tested? - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/releases/v0.11.0 --------- Signed-off-by: leichao.lc <leichao139636@163.com> Signed-off-by: CaveNightingale <2859066733@qq.com> Signed-off-by: nwpu-zxr <zhouxuerong2@huawei.com> Signed-off-by: wangxiaoteng <wangxiaoteng@huawei.com> Signed-off-by: hanxinlong <50882499@qq.com> Signed-off-by: liziyu <liziyu16@huawei.com> Co-authored-by: CaveNightingale <2859066733@qq.com> Co-authored-by: nwpu-zxr <zhouxuerong2@huawei.com> Co-authored-by: wangxiaoteng <wangxiaoteng@huawei.com> Co-authored-by: hanxinlong <50882499@qq.com>
48 lines
1.7 KiB
Python
48 lines
1.7 KiB
Python
import torch
|
|
import torch.distributed as dist
|
|
|
|
from vllm_ascend.distributed.parallel_state import get_p_tp_group
|
|
|
|
|
|
def kv_alltoall_and_rearrange(pd_tp_ratio: int, key: torch.Tensor,
|
|
value: torch.TensorType):
|
|
if pd_tp_ratio <= 1:
|
|
return None, None
|
|
elif key is None or value is None:
|
|
raise ValueError("key or value is None")
|
|
k_output = alltoall_and_rearrange(pd_tp_ratio, key)
|
|
v_output = alltoall_and_rearrange(pd_tp_ratio, value)
|
|
return k_output, v_output
|
|
|
|
|
|
def alltoall_and_rearrange(tp_ratio: int, input_tensor: torch.Tensor):
|
|
num_kv_heads = input_tensor.size(1)
|
|
output_tensor = torch.zeros_like(input_tensor)
|
|
dist.all_to_all_single(output_tensor,
|
|
input_tensor,
|
|
group=get_p_tp_group().device_group)
|
|
input_tensor = 0
|
|
result = rearrange_output(output_tensor, tp_ratio, num_kv_heads)
|
|
output_tensor = 0
|
|
return result
|
|
|
|
|
|
def rearrange_output(base_output: torch.Tensor, cut_num: int,
|
|
num_kv_heads: int):
|
|
size_0 = base_output.size(0)
|
|
if size_0 % cut_num != 0:
|
|
raise ValueError(
|
|
f"The size of dim 0 [{size_0}] must be divisible by the cut_num [{cut_num}]"
|
|
)
|
|
chunk_size = size_0 // cut_num
|
|
reshaped = base_output.view(cut_num, chunk_size, -1)
|
|
transposed = reshaped.transpose(0, 1)
|
|
return transposed.contiguous().view(size_0, num_kv_heads, -1)
|
|
|
|
|
|
def align_memory(tensor: torch.Tensor, alignment: int) -> torch.Tensor:
|
|
data_ptr = tensor.data_ptr()
|
|
aligned_addr = (data_ptr + alignment - 1) // alignment * alignment
|
|
offset = (aligned_addr - data_ptr) // tensor.element_size()
|
|
return tensor[int(offset):]
|