Skip to content

[CI/UT][PD Disaggreate] Initialize PD Disaggreate UT #889

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions .github/workflows/vllm_ascend_test_pd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,6 @@ jobs:
options: >-
--device /dev/davinci0
--device /dev/davinci1
--device /dev/davinci2
--device /dev/davinci3
--device /dev/davinci4
--device /dev/davinci5
--device /dev/davinci6
--device /dev/davinci7
--device /dev/davinci_manager
--device /dev/devmm_svm
--device /dev/hisi_hdc
Expand Down Expand Up @@ -105,3 +99,7 @@ jobs:
run: |
pip install -r requirements-dev.txt
pip install -v -e .

- name: Run vllm-project/vllm-ascend PD Disaggregation test
run: |
pytest -sv tests/e2e/pd_disaggreate/test_pd_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from multiprocessing import Event, Process

kv_connector_extra_config = {
"prompt_device_ips": ["1.2.3.1", "1.2.3.2"],
"prefill_device_ips": ["1.2.3.1", "1.2.3.2"],
"decode_device_ips": ["1.2.3.9", "1.2.3.10"],
"llmdatadist_comm_port": 26000,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,13 @@ async def handle_request():


if __name__ == "__main__":
t = start_service_discovery("0.0.0.0", 30001)
app.run(host="0.0.0.0", port=10001)
import argparse
parser = argparse.ArgumentParser(
description="args of disaggregated-prefill proxy")
parser.add_argument("--http-port", type=int, default=10001)
parser.add_argument("--register-port", type=int, default=10002)
args = parser.parse_args()

t = start_service_discovery("0.0.0.0", args.register_port)
app.run(host="0.0.0.0", port=args.http_port)
t.join()
2 changes: 2 additions & 0 deletions packages.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
git
vim
wget
jq
curl
2 changes: 2 additions & 0 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,5 @@ types-jsonschema
xgrammar
zmq
numba
quart
types-psutil
134 changes: 134 additions & 0 deletions tests/e2e/pd_disaggreate/setup_pd.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
#!/bin/bash

#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#

function run_prefill_instance() {
local model_name=$1
local tp_size=$2
local prefill_port=$3
local register_port=$4
local prefill_device_ips=$5
local decode_device_ips=$6

echo "================================"
echo "Testing model: $model_name"
echo "================================"
# Start prefill instance

KV_CONFIG=$(jq -n \
--arg kv_connector "AscendSimpleConnector" \
--arg kv_buffer_device "npu" \
--arg kv_role "kv_producer" \
--argjson kv_parallel_size 8 \
--arg kv_port 11001 \
--argjson prefill_device_ips "$prefill_device_ips" \
--argjson decode_device_ips "$decode_device_ips" \
--argjson llmdatadist_comm_port 26000 \
--arg proxy_ip "0.0.0.0" \
--argjson proxy_port "$register_port" \
--argjson http_port "$prefill_port" \
'{
"kv_connector": $kv_connector,
"kv_buffer_device": $kv_buffer_device,
"kv_role": $kv_role,
"kv_parallel_size": $kv_parallel_size,
"kv_port": $kv_port,
"kv_connector_extra_config": {
"prefill_device_ips": $prefill_device_ips,
"decode_device_ips": $decode_device_ips,
"llmdatadist_comm_port": $llmdatadist_comm_port,
"proxy_ip": $proxy_ip,
"proxy_port": $proxy_port,
"http_port": $http_port
}
}')

# start prefill instance
ASCEND_RT_VISIBLE_DEVICES=0 vllm serve $model_name \
--host 0.0.0.0 \
--port $prefill_port \
--tensor-parallel-size $tp_size \
--served-model-name Deepseek \
--max-model-len 2000 \
--trust-remote-code \
--kv-transfer-config "$KV_CONFIG" &
}



function run_decode_instance() {
# Start decode instance
local model_name=$1
local tp_size=$2
local decode_port=$3
local register_port=$4
local prefill_device_ips=$5
local decode_device_ips=$6

KV_CONFIG=$(jq -n \
--arg kv_connector "AscendSimpleConnector" \
--arg kv_buffer_device "npu" \
--arg kv_role "kv_consumer" \
--argjson kv_parallel_size 8 \
--arg kv_port 21001 \
--argjson prefill_device_ips "$prefill_device_ips" \
--argjson decode_device_ips "$decode_device_ips" \
--argjson llmdatadist_comm_port 26000 \
--arg proxy_ip "0.0.0.0" \
--argjson proxy_port "$register_port" \
--argjson http_port "$decode_port" \
'{
"kv_connector": $kv_connector,
"kv_buffer_device": $kv_buffer_device,
"kv_role": $kv_role,
"kv_parallel_size": $kv_parallel_size,
"kv_port": $kv_port,
"kv_connector_extra_config": {
"prefill_device_ips": $prefill_device_ips,
"decode_device_ips": $decode_device_ips,
"llmdatadist_comm_port": $llmdatadist_comm_port,
"proxy_ip": $proxy_ip,
"proxy_port": $proxy_port,
"http_port": $http_port
}
}')

# start decode instance
ASCEND_RT_VISIBLE_DEVICES=1 vllm serve $model_name \
--host 0.0.0.0 \
--port $decode_port \
--tensor-parallel-size $tp_size \
--seed 1024 \
--served-model-name Deepseek \
--max-model-len 2000 \
--max-num-batched-tokens 2000 \
--trust-remote-code \
--gpu-memory-utilization 0.9 \
--kv-transfer-config "$KV_CONFIG" &
}

function run_proxy_server() {
# Build the command for the proxy server with all the hosts and ports
register_port=$1
proxy_port=$2
PROXY_CMD="python examples/disaggregated_prefill/p2p_disaggrefated_prefill_proxy.py --http-port $proxy_port --register-port $register_port"

# Start the proxy server
echo "Starting proxy server with command: $PROXY_CMD"
$PROXY_CMD &
}
109 changes: 109 additions & 0 deletions tests/e2e/pd_disaggreate/test_pd_e2e.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#

import os
import signal
import subprocess
import time

import psutil
import requests


def kill_process_and_children(pid):
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
for child in children:
print(f"Killing child process {child.pid}")
child.kill()
print(f"Killing parent process {pid}")
parent.kill()
except psutil.NoSuchProcess:
pass


def kill_all_vllm_related():
current_pid = os.getpid()

for proc in psutil.process_iter(['pid', 'cmdline']):
try:
if proc.pid == current_pid:
continue
cmd = ' '.join(proc.info['cmdline'])
if "vllm" in cmd or "proxy" in cmd or "engine_worker" in cmd:
kill_process_and_children(proc.pid)
except Exception:
continue


PROXY_PORT = 10102
DECODE_PORT = 8002

SCRIPT_PATH = os.path.abspath("./tests/e2e/run_disagg_pd.sh")


def wait_for_port(port, timeout=30):
import socket
start = time.time()
while time.time() - start < timeout:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
if sock.connect_ex(("127.0.0.1", port)) == 0:
return True
time.sleep(1)
raise TimeoutError(f"Port {port} not ready after {timeout}s")


def start_and_test_pipeline():
print("Launching bash script to run vLLM PD setup...")
proc = subprocess.Popen(["bash", SCRIPT_PATH])
try:
print("Waiting for proxy port to be available...")
wait_for_port(PROXY_PORT, 180)
wait_for_port(DECODE_PORT, 600)

# request
payload = {
"model": "Deepseek",
"prompt": "The future of AI is",
"max_tokens": 64,
"temperature": 0,
}
response = requests.post(
f"http://localhost:{PROXY_PORT}/v1/completions",
headers={"Content-Type": "application/json"},
json=payload,
timeout=10)
assert response.status_code == 200, f"HTTP failed: {response.status_code}"
result = response.json()
print("Response:", result)
assert "text" in result["choices"][0]
assert len(result["choices"][0]["text"].strip()) > 0

finally:
# clean up subprocesses
print("Cleaning up subprocess...")
proc.send_signal(signal.SIGINT)
try:
proc.wait(timeout=10)
except subprocess.TimeoutExpired:
proc.kill()
kill_all_vllm_related()


def test_disaggregated_pd_pipeline():
start_and_test_pipeline()
58 changes: 58 additions & 0 deletions tests/e2e/run_disagg_pd.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
#!/bin/bash

#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is a part of the vllm-ascend project.
#

set -eo errexit

. $(dirname "$0")/common.sh
. $(dirname "$0")/pd_disaggreate/setup_pd.sh

export VLLM_USE_MODELSCOPE="True"

MODEL_NAME="deepseek-ai/DeepSeek-V2-Lite"
# TODO: add tp case
TP_SIZE=1

# TODO: support multi-card
prefill_ip=$(/usr/local/Ascend/driver/tools/hccn_tool -i 0 -ip -g | grep "ipaddr" | awk -F: '{print $2}' | xargs)
PREFILL_DEVICE_IPS="[\"$prefill_ip\"]"

decode_ip=$(/usr/local/Ascend/driver/tools/hccn_tool -i 1 -ip -g | grep "ipaddr" | awk -F: '{print $2}' | xargs)
DECODE_DEVICE_IPS="[\"$decode_ip\"]"

_info "====> Start pd disaggregated test"
REGISTER_PORT=10101
PREOXY_PORT=10102
run_proxy_server $REGISTER_PORT $PREOXY_PORT
_info "Started pd disaggregated proxy server"

PREFILL_PROC_NAME="Prefill-instance"
PREFILL_PORT=8001
run_prefill_instance $MODEL_NAME $TP_SIZE $PREFILL_PORT $REGISTER_PORT $PREFILL_DEVICE_IPS $DECODE_DEVICE_IPS
_info "Starting prefill instance"

wait_url_ready $PREFILL_PROC_NAME "http://localhost:${PREFILL_PORT}/v1/completions"

DECODE_PROC_NAME="Decode-instance"
DECODE_PORT=8002
run_decode_instance $MODEL_NAME $TP_SIZE $DECODE_PORT $REGISTER_PORT $PREFILL_DEVICE_IPS $DECODE_DEVICE_IPS
_info "Starting decode instance"

wait_url_ready $DECODE_PROC_NAME "http://localhost:${DECODE_PORT}/v1/completions"

_info "pd disaggregated system is ready for handling request"
Loading
Loading