|
| 1 | +#!/bin/bash |
| 2 | + |
| 3 | + |
| 4 | +check_npus() { |
| 5 | + # shellcheck disable=SC2155 |
| 6 | + declare -g npu_count=$(npu-smi info -l | grep "Total Count" | awk -F ':' '{print $2}' | tr -d ' ') |
| 7 | + |
| 8 | + if [[ -z "$npu_count" || "$npu_count" -eq 0 ]]; then |
| 9 | + echo "Need at least 1 NPU to run benchmarking." |
| 10 | + exit 1 |
| 11 | + else |
| 12 | + echo "found NPU conut: $npu_count" |
| 13 | + fi |
| 14 | + |
| 15 | + npu_type=$(npu-smi info | grep -E "^\| [0-9]+" | awk -F '|' '{print $2}' | awk '{$1=$1;print}' | awk '{print $2}') |
| 16 | + |
| 17 | + echo "NPU type is: $npu_type" |
| 18 | +} |
| 19 | + |
| 20 | +ensure_sharegpt_downloaded() { |
| 21 | + local FILE=ShareGPT_V3_unfiltered_cleaned_split.json |
| 22 | + if [ ! -f "$FILE" ]; then |
| 23 | + echo "$FILE not found, downloading from hf-mirror ..." |
| 24 | + wget https://hf-mirror.com/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/$FILE |
| 25 | + else |
| 26 | + echo "$FILE already exists." |
| 27 | + fi |
| 28 | +} |
| 29 | + |
| 30 | +json2args() { |
| 31 | + # transforms the JSON string to command line args, and '_' is replaced to '-' |
| 32 | + # example: |
| 33 | + # input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 } |
| 34 | + # output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1 |
| 35 | + local json_string=$1 |
| 36 | + local args |
| 37 | + args=$( |
| 38 | + echo "$json_string" | jq -r ' |
| 39 | + to_entries | |
| 40 | + map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) | |
| 41 | + join(" ") |
| 42 | + ' |
| 43 | + ) |
| 44 | + echo "$args" |
| 45 | +} |
| 46 | + |
| 47 | +wait_for_server() { |
| 48 | + # wait for vllm server to start |
| 49 | + # return 1 if vllm server crashes |
| 50 | + timeout 1200 bash -c ' |
| 51 | + until curl -X POST localhost:8000/v1/completions; do |
| 52 | + sleep 1 |
| 53 | + done' && return 0 || return 1 |
| 54 | +} |
| 55 | + |
| 56 | +get_cur_npu_id() { |
| 57 | + npu-smi info -l | awk -F ':' '/NPU ID/ {print $2+0; exit}' |
| 58 | +} |
| 59 | + |
| 60 | +kill_npu_processes() { |
| 61 | + ps -aux |
| 62 | + lsof -t -i:8000 | xargs -r kill -9 |
| 63 | + pgrep python3 | xargs -r kill -9 |
| 64 | + |
| 65 | + sleep 4 |
| 66 | + rm -rf ~/.config/vllm |
| 67 | + |
| 68 | +} |
| 69 | + |
| 70 | + |
| 71 | +run_latency_tests() { |
| 72 | + # run latency tests using `benchmark_latency.py` |
| 73 | + # $1: a json file specifying latency test cases |
| 74 | + |
| 75 | + local latency_test_file |
| 76 | + latency_test_file=$1 |
| 77 | + |
| 78 | + # Iterate over latency tests |
| 79 | + jq -c '.[]' "$latency_test_file" | while read -r params; do |
| 80 | + # get the test name, and append the NPU type back to it. |
| 81 | + test_name=$(echo "$params" | jq -r '.test_name') |
| 82 | + if [[ ! "$test_name" =~ ^latency_ ]]; then |
| 83 | + echo "In latency-test.json, test_name must start with \"latency_\"." |
| 84 | + exit 1 |
| 85 | + fi |
| 86 | + |
| 87 | + # if TEST_SELECTOR is set, only run the test cases that match the selector |
| 88 | + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then |
| 89 | + echo "Skip test case $test_name." |
| 90 | + continue |
| 91 | + fi |
| 92 | + |
| 93 | + # get arguments |
| 94 | + latency_params=$(echo "$params" | jq -r '.parameters') |
| 95 | + latency_args=$(json2args "$latency_params") |
| 96 | + |
| 97 | + latency_command="python3 vllm_benchmarks/benchmark_latency.py \ |
| 98 | + --output-json $RESULTS_FOLDER/${test_name}.json \ |
| 99 | + $latency_args" |
| 100 | + |
| 101 | + echo "Running test case $test_name" |
| 102 | + echo "Latency command: $latency_command" |
| 103 | + |
| 104 | + # run the benchmark |
| 105 | + eval "$latency_command" |
| 106 | + |
| 107 | + kill_npu_processes |
| 108 | + |
| 109 | + done |
| 110 | +} |
| 111 | + |
| 112 | +run_throughput_tests() { |
| 113 | + # run throughput tests using `benchmark_throughput.py` |
| 114 | + # $1: a json file specifying throughput test cases |
| 115 | + |
| 116 | + local throughput_test_file |
| 117 | + throughput_test_file=$1 |
| 118 | + |
| 119 | + # Iterate over throughput tests |
| 120 | + jq -c '.[]' "$throughput_test_file" | while read -r params; do |
| 121 | + # get the test name, and append the NPU type back to it. |
| 122 | + test_name=$(echo "$params" | jq -r '.test_name') |
| 123 | + if [[ ! "$test_name" =~ ^throughput_ ]]; then |
| 124 | + echo "In throughput-test.json, test_name must start with \"throughput_\"." |
| 125 | + exit 1 |
| 126 | + fi |
| 127 | + |
| 128 | + # if TEST_SELECTOR is set, only run the test cases that match the selector |
| 129 | + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then |
| 130 | + echo "Skip test case $test_name." |
| 131 | + continue |
| 132 | + fi |
| 133 | + |
| 134 | + # get arguments |
| 135 | + throughput_params=$(echo "$params" | jq -r '.parameters') |
| 136 | + throughput_args=$(json2args "$throughput_params") |
| 137 | + |
| 138 | + throughput_command="python3 vllm_benchmarks/benchmark_throughput.py \ |
| 139 | + --output-json $RESULTS_FOLDER/${test_name}.json \ |
| 140 | + $throughput_args" |
| 141 | + |
| 142 | + echo "Running test case $test_name" |
| 143 | + echo "Throughput command: $throughput_command" |
| 144 | + |
| 145 | + # run the benchmark |
| 146 | + eval "$throughput_command" |
| 147 | + |
| 148 | + kill_npu_processes |
| 149 | + |
| 150 | + done |
| 151 | +} |
| 152 | + |
| 153 | +run_serving_tests() { |
| 154 | + # run serving tests using `benchmark_serving.py` |
| 155 | + # $1: a json file specifying serving test cases |
| 156 | + |
| 157 | + local serving_test_file |
| 158 | + serving_test_file=$1 |
| 159 | + |
| 160 | + # Iterate over serving tests |
| 161 | + jq -c '.[]' "$serving_test_file" | while read -r params; do |
| 162 | + # get the test name, and append the NPU type back to it. |
| 163 | + test_name=$(echo "$params" | jq -r '.test_name') |
| 164 | + if [[ ! "$test_name" =~ ^serving_ ]]; then |
| 165 | + echo "In serving-test.json, test_name must start with \"serving_\"." |
| 166 | + exit 1 |
| 167 | + fi |
| 168 | + |
| 169 | + # if TEST_SELECTOR is set, only run the test cases that match the selector |
| 170 | + if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then |
| 171 | + echo "Skip test case $test_name." |
| 172 | + continue |
| 173 | + fi |
| 174 | + |
| 175 | + # get client and server arguments |
| 176 | + server_params=$(echo "$params" | jq -r '.server_parameters') |
| 177 | + client_params=$(echo "$params" | jq -r '.client_parameters') |
| 178 | + server_args=$(json2args "$server_params") |
| 179 | + client_args=$(json2args "$client_params") |
| 180 | + qps_list=$(echo "$params" | jq -r '.qps_list') |
| 181 | + qps_list=$(echo "$qps_list" | jq -r '.[] | @sh') |
| 182 | + echo "Running over qps list $qps_list" |
| 183 | + |
| 184 | + # check if server model and client model is aligned |
| 185 | + server_model=$(echo "$server_params" | jq -r '.model') |
| 186 | + client_model=$(echo "$client_params" | jq -r '.model') |
| 187 | + if [[ $server_model != "$client_model" ]]; then |
| 188 | + echo "Server model and client model must be the same. Skip testcase $test_name." |
| 189 | + continue |
| 190 | + fi |
| 191 | + |
| 192 | + server_command="python3 \ |
| 193 | + -m vllm.entrypoints.openai.api_server \ |
| 194 | + $server_args" |
| 195 | + |
| 196 | + # run the server |
| 197 | + echo "Running test case $test_name" |
| 198 | + echo "Server command: $server_command" |
| 199 | + bash -c "$server_command" & |
| 200 | + server_pid=$! |
| 201 | + |
| 202 | + # wait until the server is alive |
| 203 | + if wait_for_server; then |
| 204 | + echo "" |
| 205 | + echo "vllm server is up and running." |
| 206 | + else |
| 207 | + echo "" |
| 208 | + echo "vllm failed to start within the timeout period." |
| 209 | + fi |
| 210 | + |
| 211 | + # iterate over different QPS |
| 212 | + for qps in $qps_list; do |
| 213 | + # remove the surrounding single quote from qps |
| 214 | + if [[ "$qps" == *"inf"* ]]; then |
| 215 | + echo "qps was $qps" |
| 216 | + qps="inf" |
| 217 | + echo "now qps is $qps" |
| 218 | + fi |
| 219 | + |
| 220 | + new_test_name=$test_name"_qps_"$qps |
| 221 | + |
| 222 | + client_command="python3 vllm_benchmarks/benchmark_serving.py \ |
| 223 | + --save-result \ |
| 224 | + --result-dir $RESULTS_FOLDER \ |
| 225 | + --result-filename ${new_test_name}.json \ |
| 226 | + --request-rate $qps \ |
| 227 | + $client_args" |
| 228 | + |
| 229 | + echo "Running test case $test_name with qps $qps" |
| 230 | + echo "Client command: $client_command" |
| 231 | + |
| 232 | + bash -c "$client_command" |
| 233 | + done |
| 234 | + |
| 235 | + # clean up |
| 236 | + kill -9 $server_pid |
| 237 | + kill_npu_processes |
| 238 | + done |
| 239 | +} |
| 240 | + |
| 241 | +cleanup() { |
| 242 | + rm -rf ./vllm_benchmarks |
| 243 | +} |
| 244 | + |
| 245 | +get_benchmarks_scripts() { |
| 246 | + git clone -b main --depth=1 git@github.com:vllm-project/vllm.git && \ |
| 247 | + mv vllm/benchmarks vllm_benchmarks |
| 248 | + rm -rf ./vllm |
| 249 | +} |
| 250 | + |
| 251 | +main() { |
| 252 | + |
| 253 | + START_TIME=$(date +%s) |
| 254 | + check_npus |
| 255 | + |
| 256 | + # dependencies |
| 257 | + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) |
| 258 | + (which jq) || (apt-get update && apt-get -y install jq) |
| 259 | + (which lsof) || (apt-get update && apt-get install -y lsof) |
| 260 | + |
| 261 | + # get the current IP address, required by benchmark_serving.py |
| 262 | + # shellcheck disable=SC2155 |
| 263 | + export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') |
| 264 | + # turn of the reporting of the status of each request, to clean up the terminal output |
| 265 | + export VLLM_LOG_LEVEL="WARNING" |
| 266 | + |
| 267 | + # set env |
| 268 | + export VLLM_USE_MODELSCOPE="True" |
| 269 | + export HF_ENDPOINT="https://hf-mirror.com" |
| 270 | + |
| 271 | + # prepare for benchmarking |
| 272 | + cd benchmarks || exit 1 |
| 273 | + get_benchmarks_scripts |
| 274 | + trap cleanup EXIT |
| 275 | + |
| 276 | + QUICK_BENCHMARK_ROOT=./ |
| 277 | + |
| 278 | + declare -g RESULTS_FOLDER=results |
| 279 | + mkdir -p $RESULTS_FOLDER |
| 280 | + |
| 281 | + ensure_sharegpt_downloaded |
| 282 | + # benchmarks |
| 283 | + run_serving_tests $QUICK_BENCHMARK_ROOT/tests/serving-tests.json |
| 284 | + run_latency_tests $QUICK_BENCHMARK_ROOT/tests/latency-tests.json |
| 285 | + run_throughput_tests $QUICK_BENCHMARK_ROOT/tests/throughput-tests.json |
| 286 | + |
| 287 | + END_TIME=$(date +%s) |
| 288 | + ELAPSED_TIME=$((END_TIME - START_TIME)) |
| 289 | + echo "Total execution time: $ELAPSED_TIME seconds" |
| 290 | + |
| 291 | +} |
| 292 | + |
| 293 | +main "$@" |
0 commit comments