[CI/UT][PD Disaggreate] Initialize PD Disaggreate UT (#889)

Initialize PD Disaggreate UT

---------

Signed-off-by: MengqingCao <cmq0113@163.com>
This commit is contained in:
Mengqing Cao
2025-05-29 10:17:12 +08:00
committed by GitHub
parent f6e5decc10
commit 6eddbd2521
9 changed files with 327 additions and 17 deletions

View File

@ -55,12 +55,6 @@ jobs:
options: >-
--device /dev/davinci0
--device /dev/davinci1
--device /dev/davinci2
--device /dev/davinci3
--device /dev/davinci4
--device /dev/davinci5
--device /dev/davinci6
--device /dev/davinci7
--device /dev/davinci_manager
--device /dev/devmm_svm
--device /dev/hisi_hdc
@ -105,3 +99,7 @@ jobs:
run: |
pip install -r requirements-dev.txt
pip install -v -e .
- name: Run vllm-project/vllm-ascend PD Disaggregation test
run: |
pytest -sv tests/e2e/pd_disaggreate/test_pd_e2e.py

View File

@ -13,7 +13,7 @@ import time
from multiprocessing import Event, Process
kv_connector_extra_config = {
"prompt_device_ips": ["1.2.3.1", "1.2.3.2"],
"prefill_device_ips": ["1.2.3.1", "1.2.3.2"],
"decode_device_ips": ["1.2.3.9", "1.2.3.10"],
"llmdatadist_comm_port": 26000,
}

View File

@ -181,6 +181,13 @@ async def handle_request():
if __name__ == "__main__":
t = start_service_discovery("0.0.0.0", 30001)
app.run(host="0.0.0.0", port=10001)
import argparse
parser = argparse.ArgumentParser(
description="args of disaggregated-prefill proxy")
parser.add_argument("--http-port", type=int, default=10001)
parser.add_argument("--register-port", type=int, default=10002)
args = parser.parse_args()
t = start_service_discovery("0.0.0.0", args.register_port)
app.run(host="0.0.0.0", port=args.http_port)
t.join()

View File

@ -1,3 +1,5 @@
git
vim
wget
jq
curl

View File

@ -10,3 +10,5 @@ types-jsonschema
xgrammar
zmq
numba
quart
types-psutil

View File

@ -0,0 +1,134 @@
#!/bin/bash
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
function run_prefill_instance() {
local model_name=$1
local tp_size=$2
local prefill_port=$3
local register_port=$4
local prefill_device_ips=$5
local decode_device_ips=$6
echo "================================"
echo "Testing model: $model_name"
echo "================================"
# Start prefill instance
KV_CONFIG=$(jq -n \
--arg kv_connector "AscendSimpleConnector" \
--arg kv_buffer_device "npu" \
--arg kv_role "kv_producer" \
--argjson kv_parallel_size 8 \
--arg kv_port 11001 \
--argjson prefill_device_ips "$prefill_device_ips" \
--argjson decode_device_ips "$decode_device_ips" \
--argjson llmdatadist_comm_port 26000 \
--arg proxy_ip "0.0.0.0" \
--argjson proxy_port "$register_port" \
--argjson http_port "$prefill_port" \
'{
"kv_connector": $kv_connector,
"kv_buffer_device": $kv_buffer_device,
"kv_role": $kv_role,
"kv_parallel_size": $kv_parallel_size,
"kv_port": $kv_port,
"kv_connector_extra_config": {
"prefill_device_ips": $prefill_device_ips,
"decode_device_ips": $decode_device_ips,
"llmdatadist_comm_port": $llmdatadist_comm_port,
"proxy_ip": $proxy_ip,
"proxy_port": $proxy_port,
"http_port": $http_port
}
}')
# start prefill instance
ASCEND_RT_VISIBLE_DEVICES=0 vllm serve $model_name \
--host 0.0.0.0 \
--port $prefill_port \
--tensor-parallel-size $tp_size \
--served-model-name Deepseek \
--max-model-len 2000 \
--trust-remote-code \
--kv-transfer-config "$KV_CONFIG" &
}
function run_decode_instance() {
# Start decode instance
local model_name=$1
local tp_size=$2
local decode_port=$3
local register_port=$4
local prefill_device_ips=$5
local decode_device_ips=$6
KV_CONFIG=$(jq -n \
--arg kv_connector "AscendSimpleConnector" \
--arg kv_buffer_device "npu" \
--arg kv_role "kv_consumer" \
--argjson kv_parallel_size 8 \
--arg kv_port 21001 \
--argjson prefill_device_ips "$prefill_device_ips" \
--argjson decode_device_ips "$decode_device_ips" \
--argjson llmdatadist_comm_port 26000 \
--arg proxy_ip "0.0.0.0" \
--argjson proxy_port "$register_port" \
--argjson http_port "$decode_port" \
'{
"kv_connector": $kv_connector,
"kv_buffer_device": $kv_buffer_device,
"kv_role": $kv_role,
"kv_parallel_size": $kv_parallel_size,
"kv_port": $kv_port,
"kv_connector_extra_config": {
"prefill_device_ips": $prefill_device_ips,
"decode_device_ips": $decode_device_ips,
"llmdatadist_comm_port": $llmdatadist_comm_port,
"proxy_ip": $proxy_ip,
"proxy_port": $proxy_port,
"http_port": $http_port
}
}')
# start decode instance
ASCEND_RT_VISIBLE_DEVICES=1 vllm serve $model_name \
--host 0.0.0.0 \
--port $decode_port \
--tensor-parallel-size $tp_size \
--seed 1024 \
--served-model-name Deepseek \
--max-model-len 2000 \
--max-num-batched-tokens 2000 \
--trust-remote-code \
--gpu-memory-utilization 0.9 \
--kv-transfer-config "$KV_CONFIG" &
}
function run_proxy_server() {
# Build the command for the proxy server with all the hosts and ports
register_port=$1
proxy_port=$2
PROXY_CMD="python examples/disaggregated_prefill/p2p_disaggrefated_prefill_proxy.py --http-port $proxy_port --register-port $register_port"
# Start the proxy server
echo "Starting proxy server with command: $PROXY_CMD"
$PROXY_CMD &
}

View File

@ -0,0 +1,109 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
import os
import signal
import subprocess
import time
import psutil
import requests
def kill_process_and_children(pid):
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
for child in children:
print(f"Killing child process {child.pid}")
child.kill()
print(f"Killing parent process {pid}")
parent.kill()
except psutil.NoSuchProcess:
pass
def kill_all_vllm_related():
current_pid = os.getpid()
for proc in psutil.process_iter(['pid', 'cmdline']):
try:
if proc.pid == current_pid:
continue
cmd = ' '.join(proc.info['cmdline'])
if "vllm" in cmd or "proxy" in cmd or "engine_worker" in cmd:
kill_process_and_children(proc.pid)
except Exception:
continue
PROXY_PORT = 10102
DECODE_PORT = 8002
SCRIPT_PATH = os.path.abspath("./tests/e2e/run_disagg_pd.sh")
def wait_for_port(port, timeout=30):
import socket
start = time.time()
while time.time() - start < timeout:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
if sock.connect_ex(("127.0.0.1", port)) == 0:
return True
time.sleep(1)
raise TimeoutError(f"Port {port} not ready after {timeout}s")
def start_and_test_pipeline():
print("Launching bash script to run vLLM PD setup...")
proc = subprocess.Popen(["bash", SCRIPT_PATH])
try:
print("Waiting for proxy port to be available...")
wait_for_port(PROXY_PORT, 180)
wait_for_port(DECODE_PORT, 600)
# request
payload = {
"model": "Deepseek",
"prompt": "The future of AI is",
"max_tokens": 64,
"temperature": 0,
}
response = requests.post(
f"http://localhost:{PROXY_PORT}/v1/completions",
headers={"Content-Type": "application/json"},
json=payload,
timeout=10)
assert response.status_code == 200, f"HTTP failed: {response.status_code}"
result = response.json()
print("Response:", result)
assert "text" in result["choices"][0]
assert len(result["choices"][0]["text"].strip()) > 0
finally:
# clean up subprocesses
print("Cleaning up subprocess...")
proc.send_signal(signal.SIGINT)
try:
proc.wait(timeout=10)
except subprocess.TimeoutExpired:
proc.kill()
kill_all_vllm_related()
def test_disaggregated_pd_pipeline():
start_and_test_pipeline()

View File

@ -0,0 +1,58 @@
#!/bin/bash
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#
set -eo errexit
. $(dirname "$0")/common.sh
. $(dirname "$0")/pd_disaggreate/setup_pd.sh
export VLLM_USE_MODELSCOPE="True"
MODEL_NAME="deepseek-ai/DeepSeek-V2-Lite"
# TODO: add tp case
TP_SIZE=1
# TODO: support multi-card
prefill_ip=$(/usr/local/Ascend/driver/tools/hccn_tool -i 0 -ip -g | grep "ipaddr" | awk -F: '{print $2}' | xargs)
PREFILL_DEVICE_IPS="[\"$prefill_ip\"]"
decode_ip=$(/usr/local/Ascend/driver/tools/hccn_tool -i 1 -ip -g | grep "ipaddr" | awk -F: '{print $2}' | xargs)
DECODE_DEVICE_IPS="[\"$decode_ip\"]"
_info "====> Start pd disaggregated test"
REGISTER_PORT=10101
PREOXY_PORT=10102
run_proxy_server $REGISTER_PORT $PREOXY_PORT
_info "Started pd disaggregated proxy server"
PREFILL_PROC_NAME="Prefill-instance"
PREFILL_PORT=8001
run_prefill_instance $MODEL_NAME $TP_SIZE $PREFILL_PORT $REGISTER_PORT $PREFILL_DEVICE_IPS $DECODE_DEVICE_IPS
_info "Starting prefill instance"
wait_url_ready $PREFILL_PROC_NAME "http://localhost:${PREFILL_PORT}/v1/completions"
DECODE_PROC_NAME="Decode-instance"
DECODE_PORT=8002
run_decode_instance $MODEL_NAME $TP_SIZE $DECODE_PORT $REGISTER_PORT $PREFILL_DEVICE_IPS $DECODE_DEVICE_IPS
_info "Starting decode instance"
wait_url_ready $DECODE_PROC_NAME "http://localhost:${DECODE_PORT}/v1/completions"
_info "pd disaggregated system is ready for handling request"

View File

@ -61,22 +61,22 @@ class SimplePipe(KVPipeBase):
raise NotImplementedError(
"kv_role should be inside [kv_producer, kv_consumer]")
prompt_device_ips = kv_connector_extra_config.get(
"prompt_device_ips", None)
prefill_device_ips = kv_connector_extra_config.get(
"prefill_device_ips", None)
decode_device_ips = kv_connector_extra_config.get(
"decode_device_ips", None)
if prompt_device_ips is None or decode_device_ips is None:
if prefill_device_ips is None or decode_device_ips is None:
raise ValueError(
"Please specify prompt_device_ips and decode_device_ips"
"Please specify prefill_device_ips and decode_device_ips"
"in kv_transfer_config.kv_connector_extra_config")
p_device_num = len(prompt_device_ips)
p_device_num = len(prefill_device_ips)
d_device_num = len(decode_device_ips)
# When number of devices in P and D is not equal,
# we assume that device in D can be mapped to any device in P.
self.p_device_rank = self.rank % p_device_num
self.d_device_rank = self.rank % d_device_num
self.prompt_ip_list = prompt_device_ips
self.prompt_ip_list = prefill_device_ips
self.decode_ip_list = decode_device_ips
self.llmdatadist_comm_port = kv_connector_extra_config.get(
"llmdatadist_comm_port", 26000)
@ -98,7 +98,7 @@ class SimplePipe(KVPipeBase):
if proxy_ip == "" or proxy_port == "":
self.proxy_address = ""
else:
self.proxy_address = proxy_ip + ":" + proxy_port
self.proxy_address = proxy_ip + ":" + str(proxy_port)
self._register_thread = None
if port_offset == 0 and self.proxy_address != "":
@ -106,7 +106,7 @@ class SimplePipe(KVPipeBase):
# Note that only NPU 0 of each P/D instance register to proxy.
if not hostname:
hostname = get_ip() # Get ip of current host.
port = kv_transfer_config.kv_port + port_offset
port = int(kv_transfer_config.kv_port) + port_offset
if port == 0:
raise ValueError("Port cannot be 0")
self._hostname = hostname