From 433783c487568d22aa7094e08310f8c2efd6019c Mon Sep 17 00:00:00 2001 From: letonghan Date: Mon, 24 Mar 2025 11:43:38 +0800 Subject: [PATCH 01/12] refactor files of tgi on xeon Signed-off-by: letonghan --- .../intel/cpu/xeon/compose_tgi.yaml | 97 +++++ DocSum/docker_compose/set_env.sh | 19 +- DocSum/tests/test_compose_tgi_on_xeon.sh | 381 ++++++++++++++++++ 3 files changed, 489 insertions(+), 8 deletions(-) create mode 100644 DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml create mode 100644 DocSum/tests/test_compose_tgi_on_xeon.sh diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml new file mode 100644 index 0000000000..7499b97501 --- /dev/null +++ b/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml @@ -0,0 +1,97 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + tgi-server: + image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu + container_name: docsum-xeon-tgi-server + ports: + - ${LLM_ENDPOINT_PORT:-8008}:80 + volumes: + - "${MODEL_CACHE:-./data}:/data" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + shm_size: 1g + command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 --max-input-length ${MAX_INPUT_TOKENS} --max-total-tokens ${MAX_TOTAL_TOKENS} + + llm-docsum-tgi: + image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} + container_name: docsum-xeon-llm-server + depends_on: + tgi-server: + condition: service_healthy + ports: + - ${LLM_PORT:-9000}:9000 + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: ${LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS} + MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS} + DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME} + LOGFLAG: ${LOGFLAG:-False} + restart: unless-stopped + + whisper: + image: ${REGISTRY:-opea}/whisper:${TAG:-latest} + container_name: docsum-xeon-whisper-server + ports: + - "7066:7066" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped + + docsum-xeon-backend-server: + image: ${REGISTRY:-opea}/docsum:${TAG:-latest} + container_name: docsum-xeon-backend-server + depends_on: + - tgi-server + - llm-docsum-tgi + ports: + - "${BACKEND_SERVICE_PORT:-8888}:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + - ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP} + ipc: host + restart: always + + docsum-gradio-ui: + image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest} + container_name: docsum-xeon-ui-server + depends_on: + - docsum-xeon-backend-server + ports: + - "${FRONTEND_SERVICE_PORT:-5173}:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT} + - DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/DocSum/docker_compose/set_env.sh b/DocSum/docker_compose/set_env.sh index f116a99c3a..25afc30b3a 100644 --- a/DocSum/docker_compose/set_env.sh +++ b/DocSum/docker_compose/set_env.sh @@ -6,18 +6,21 @@ pushd "../../" > /dev/null source .set_env.sh popd > /dev/null + +export no_proxy="${no_proxy},${host_ip}" + +export LLM_ENDPOINT_PORT=8008 +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export MAX_INPUT_TOKENS=1024 export MAX_TOTAL_TOKENS=2048 -export no_proxy="${no_proxy},${host_ip}" +export LLM_PORT=9000 +export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" + export MEGA_SERVICE_HOST_IP=${host_ip} export LLM_SERVICE_HOST_IP=${host_ip} export ASR_SERVICE_HOST_IP=${host_ip} -export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" - -export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/docsum" -export LLM_ENDPOINT_PORT=8008 -export DOCSUM_PORT=9000 -export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" -export DocSum_COMPONENT_NAME="OpeaDocSumTgi" +export BACKEND_SERVICE_PORT=8888 +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh new file mode 100644 index 0000000000..90e63a1f0d --- /dev/null +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -0,0 +1,381 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe + +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +export http_proxy=$http_proxy +export https_proxy=$https_proxy +export host_ip=$(hostname -I | awk '{print $1}') + +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export no_proxy="${no_proxy},${host_ip}" +export MODEL_CACHE=${model_cache:-"./data"} +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export LLM_ENDPOINT_PORT=8008 +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export MAX_INPUT_TOKENS=2048 +export MAX_TOTAL_TOKENS=4096 +export LLM_PORT=9000 +export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" +export MEGA_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} +export ASR_SERVICE_HOST_IP=${host_ip} +export BACKEND_SERVICE_PORT=8888 +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" +export LOGFLAG=True + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" + +# Get the root folder of the current script +ROOT_FOLDER=$(dirname "$(readlink -f "$0")") + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="docsum docsum-gradio-ui whisper llm-docsum" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull ghcr.io/huggingface/text-generation-inference:1.4 + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + sleep 1m +} + +get_base64_str() { + local file_name=$1 + base64 -w 0 "$file_name" +} + +# Function to generate input data for testing based on the document type +input_data_for_test() { + local document_type=$1 + case $document_type in + ("text") + echo "THIS IS A TEST >>>> and a number of states are starting to adopt them voluntarily special correspondent john delenco of education week reports it takes just 10 minutes to cross through gillette wyoming this small city sits in the northeast corner of the state surrounded by 100s of miles of prairie but schools here in campbell county are on the edge of something big the next generation science standards you are going to build a strand of dna and you are going to decode it and figure out what that dna actually says for christy mathis at sage valley junior high school the new standards are about learning to think like a scientist there is a lot of really good stuff in them every standard is a performance task it is not you know the child needs to memorize these things it is the student needs to be able to do some pretty intense stuff we are analyzing we are critiquing we are." + ;; + ("audio") + get_base64_str "$ROOT_FOLDER/data/test.wav" + ;; + ("video") + get_base64_str "$ROOT_FOLDER/data/test.mp4" + ;; + (*) + echo "Invalid document type" >&2 + exit 1 + ;; + esac +} + +function validate_services_json() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local INPUT_DATA="$5" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + + echo "===========================================" + + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "CONTENT==> $CONTENT" + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 1s +} + +function validate_services_form() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local FORM_DATA1="$5" + local FORM_DATA2="$6" + local FORM_DATA3="$7" + local FORM_DATA4="$8" + local FORM_DATA5="$9" + + local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + + if [ "$HTTP_STATUS" -eq 200 ]; then + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + + local CONTENT=$(curl -s -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + + if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then + echo "[ $SERVICE_NAME ] Content is as expected." + else + echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + else + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + exit 1 + fi + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tgi for llm service + validate_services_json \ + "${host_ip}:${LLM_ENDPOINT_PORT}/generate" \ + "generated_text" \ + "tgi-server" \ + "tgi-server" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + + # llm microservice + validate_services_json \ + "${host_ip}:${LLM_PORT}/v1/docsum" \ + "text" \ + "llm-docsum-tgi" \ + "llm-docsum-server" \ + '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + + # whisper microservice + ulimit -s 65536 + validate_services_json \ + "${host_ip}:7066/v1/asr" \ + '{"asr_result":"well"}' \ + "whisper" \ + "whisper-server" \ + "{\"audio\": \"$(input_data_for_test "audio")\"}" + +} + +function validate_megaservice_text() { + echo ">>> Checking text data in json format" + validate_services_json \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + + echo ">>> Checking text data in form format, set language=en" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \ + "max_tokens=32" \ + "language=en" \ + "stream=True" + + echo ">>> Checking text data in form format, set language=zh" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=2024年9月26日,北京——今日,英特尔正式发布英特尔® 至强® 6性能核处理器(代号Granite Rapids),为AI、数据分析、科学计算等计算密集型业务提供卓越性能。" \ + "max_tokens=32" \ + "language=zh" \ + "stream=True" + + echo ">>> Checking text data in form format, upload file" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/short.txt" \ + "max_tokens=32" \ + "language=en" +} + +function validate_megaservice_multimedia() { + echo ">>> Checking audio data in json format" + validate_services_json \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + + echo ">>> Checking audio data in form format" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=audio" \ + "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ + "max_tokens=32" \ + "language=en" \ + "stream=True" + + echo ">>> Checking video data in json format" + validate_services_json \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + + echo ">>> Checking video data in form format" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=video" \ + "messages=\"$(input_data_for_test "video")\"" \ + "max_tokens=32" \ + "language=en" \ + "stream=True" +} + +function validate_megaservice_long_text() { + echo ">>> Checking long text data in form format, set summary_type=auto" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=auto" + + echo ">>> Checking long text data in form format, set summary_type=stuff" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=stuff" + + echo ">>> Checking long text data in form format, set summary_type=truncate" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=truncate" + + echo ">>> Checking long text data in form format, set summary_type=map_reduce" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=map_reduce" + + echo ">>> Checking long text data in form format, set summary_type=refine" + validate_services_form \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-xeon-backend-server" \ + "docsum-xeon-backend-server" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=refine" +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/cpu/xeon/ + docker compose -f compose_tgi.yaml stop && docker compose rm -f +} + +function main() { + echo "===========================================" + echo ">>>> Stopping any running Docker containers..." + stop_docker + + echo "===========================================" + if [[ "$IMAGE_REPO" == "opea" ]]; then + echo ">>>> Building Docker images..." + build_docker_images + fi + + echo "===========================================" + echo ">>>> Starting Docker services..." + start_services + + echo "===========================================" + echo ">>>> Validating microservices..." + validate_microservices + + echo "===========================================" + echo ">>>> Validating megaservice for text..." + validate_megaservice_text + + echo "===========================================" + echo ">>>> Validating megaservice for multimedia..." + validate_megaservice_multimedia + + echo "===========================================" + echo ">>>> Validating megaservice for long text..." + validate_megaservice_long_text + + echo "===========================================" + echo ">>>> Stopping Docker containers..." + stop_docker + + echo "===========================================" + echo ">>>> Pruning Docker system..." + echo y | docker system prune + echo ">>>> Docker system pruned successfully." + echo "===========================================" +} + +main From 46d795db7d009c0ad78868d34dd791c01a389b89 Mon Sep 17 00:00:00 2001 From: letonghan Date: Mon, 24 Mar 2025 16:42:51 +0800 Subject: [PATCH 02/12] support vllm on xeon Signed-off-by: letonghan --- .../intel/cpu/xeon/compose.yaml | 37 ++-- DocSum/docker_image_build/build.yaml | 12 ++ DocSum/tests/test_compose_on_xeon.sh | 204 +++++++++--------- DocSum/tests/test_compose_tgi_on_xeon.sh | 137 ++++++------ 4 files changed, 202 insertions(+), 188 deletions(-) diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml index 8d91db5e73..af7a3dcc8b 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml @@ -2,54 +2,53 @@ # SPDX-License-Identifier: Apache-2.0 services: - tgi-server: - image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu - container_name: tgi-server + vllm-service: + image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + container_name: docsum-xeon-vllm-service ports: - - ${LLM_ENDPOINT_PORT:-8008}:80 + - "8008:80" + volumes: + - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" + shm_size: 1g environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - TGI_LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} - HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} - host_ip: ${host_ip} - LLM_ENDPOINT_PORT: ${LLM_ENDPOINT_PORT} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + LLM_MODEL_ID: ${LLM_MODEL_ID} + VLLM_TORCH_PROFILER_DIR: "/mnt" healthcheck: - test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"] interval: 10s timeout: 10s retries: 100 - volumes: - - "${MODEL_CACHE:-./data}:/data" - shm_size: 1g - command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 --max-input-length ${MAX_INPUT_TOKENS} --max-total-tokens ${MAX_TOTAL_TOKENS} + command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80 llm-docsum-tgi: image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} - container_name: llm-docsum-server + container_name: docsum-xeon-llm-server depends_on: tgi-server: condition: service_healthy ports: - - ${DOCSUM_PORT:-9000}:9000 + - ${LLM_PORT:-9000}:9000 ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} LLM_ENDPOINT: ${LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS} MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS} - LLM_MODEL_ID: ${LLM_MODEL_ID} DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME} LOGFLAG: ${LOGFLAG:-False} restart: unless-stopped whisper: image: ${REGISTRY:-opea}/whisper:${TAG:-latest} - container_name: whisper-server + container_name: docsum-xeon-whisper-server ports: - "7066:7066" ipc: host @@ -66,7 +65,7 @@ services: - tgi-server - llm-docsum-tgi ports: - - "8888:8888" + - "${BACKEND_SERVICE_PORT:-8888}:8888" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} @@ -83,7 +82,7 @@ services: depends_on: - docsum-xeon-backend-server ports: - - "5173:5173" + - "${FRONTEND_SERVICE_PORT:-5173}:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} diff --git a/DocSum/docker_image_build/build.yaml b/DocSum/docker_image_build/build.yaml index 7eabbfc6aa..6d09a07aa9 100644 --- a/DocSum/docker_image_build/build.yaml +++ b/DocSum/docker_image_build/build.yaml @@ -49,3 +49,15 @@ services: dockerfile: comps/llms/src/doc-summarization/Dockerfile extends: docsum image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} + vllm: + build: + context: vllm + dockerfile: Dockerfile.cpu + extends: docsum + image: ${REGISTRY:-opea}/vllm:${TAG:-latest} + vllm-gaudi: + build: + context: vllm-fork + dockerfile: Dockerfile.hpu + extends: docsum + image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index c813da75d4..8ef7969a14 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -12,22 +12,24 @@ export host_ip=$(hostname -I | awk '{print $1}') echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export no_proxy="${no_proxy},${host_ip}" export MODEL_CACHE=${model_cache:-"./data"} export REGISTRY=${IMAGE_REPO} export TAG=${IMAGE_TAG} +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export LLM_ENDPOINT_PORT=8008 +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export MAX_INPUT_TOKENS=2048 export MAX_TOTAL_TOKENS=4096 -export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" -export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export DOCSUM_PORT=9000 +export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" +export DocSum_COMPONENT_NAME="OpeaDocSumvLLM" export MEGA_SERVICE_HOST_IP=${host_ip} export LLM_SERVICE_HOST_IP=${host_ip} export ASR_SERVICE_HOST_IP=${host_ip} -export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/docsum" -export no_proxy="${no_proxy},${host_ip}" -export LLM_ENDPOINT_PORT=8008 -export DOCSUM_PORT=9000 -export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" -export DocSum_COMPONENT_NAME="OpeaDocSumTgi" +export FRONTEND_SERVICE_PORT=5173 +export BACKEND_SERVICE_PORT=8888 +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" export LOGFLAG=True WORKPATH=$(dirname "$PWD") @@ -38,17 +40,33 @@ ROOT_FOLDER=$(dirname "$(readlink -f "$0")") function build_docker_images() { opea_branch=${opea_branch:-"main"} + # If the opea_branch isn't main, replace the git clone branch in Dockerfile. + if [[ "${opea_branch}" != "main" ]]; then + cd $WORKPATH + OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git" + NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git" + find . -type f -name "Dockerfile*" | while read -r file; do + echo "Processing file: $file" + sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file" + done + fi + cd $WORKPATH/docker_image_build git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git pushd GenAIComps docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s + git clone https://github.com/vllm-project/vllm.git && cd vllm + VLLM_VER="$(git describe --tags "$(git rev-list --tags --max-count=1)" )" + echo "Check out vLLM tag ${VLLM_VER}" + git checkout ${VLLM_VER} &> /dev/null + cd ../ + echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-gradio-ui whisper llm-docsum" + service_list="docsum docsum-gradio-ui whisper llm-docsum vllm" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log - docker pull ghcr.io/huggingface/text-generation-inference:1.4 docker images && sleep 1s } @@ -83,70 +101,46 @@ input_data_for_test() { esac } -function validate_services_json() { +function validate_service() { local URL="$1" local EXPECTED_RESULT="$2" local SERVICE_NAME="$3" local DOCKER_NAME="$4" - local INPUT_DATA="$5" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") - - echo "===========================================" - - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - - local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) - - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "EXPECTED_RESULT==> $EXPECTED_RESULT" - echo "CONTENT==> $CONTENT" - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - - fi + local VALIDATE_TYPE="$5" + local INPUT_DATA="$6" + local FORM_DATA1="$7" + local FORM_DATA2="$8" + local FORM_DATA3="$9" + local FORM_DATA4="$10" + local FORM_DATA5="$11" + + if [[ $VALIDATE_TYPE == *"json"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") fi - sleep 1s -} - -function validate_services_form() { - local URL="$1" - local EXPECTED_RESULT="$2" - local SERVICE_NAME="$3" - local DOCKER_NAME="$4" - local FORM_DATA1="$5" - local FORM_DATA2="$6" - local FORM_DATA3="$7" - local FORM_DATA4="$8" - local FORM_DATA5="$9" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - - local CONTENT=$(curl -s -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - fi - else + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "RESPONSE_BODY==> $RESPONSE_BODY" + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + sleep 1s } @@ -154,47 +148,52 @@ function validate_microservices() { # Check if the microservices are running correctly. # tgi for llm service - validate_services_json \ - "${host_ip}:8008/generate" \ + validate_service \ + "${host_ip}:${LLM_ENDPOINT_PORT}/generate" \ "generated_text" \ "tgi-server" \ - "tgi-server" \ + "docsum-xeon-tgi-server" \ + "json" \ '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' # llm microservice - validate_services_json \ - "${host_ip}:9000/v1/docsum" \ + validate_service \ + "${host_ip}:${LLM_PORT}/v1/docsum" \ "text" \ "llm-docsum-tgi" \ - "llm-docsum-server" \ + "docsum-xeon-llm-server" \ + "json" \ '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' # whisper microservice ulimit -s 65536 - validate_services_json \ + validate_service \ "${host_ip}:7066/v1/asr" \ '{"asr_result":"well"}' \ "whisper" \ - "whisper-server" \ + "docsum-xeon-whisper-server" \ + "json" \ "{\"audio\": \"$(input_data_for_test "audio")\"}" } function validate_megaservice_text() { echo ">>> Checking text data in json format" - validate_services_json \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "json" \ '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' echo ">>> Checking text data in form format, set language=en" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \ "max_tokens=32" \ @@ -202,11 +201,12 @@ function validate_megaservice_text() { "stream=True" echo ">>> Checking text data in form format, set language=zh" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=2024年9月26日,北京——今日,英特尔正式发布英特尔® 至强® 6性能核处理器(代号Granite Rapids),为AI、数据分析、科学计算等计算密集型业务提供卓越性能。" \ "max_tokens=32" \ @@ -214,11 +214,12 @@ function validate_megaservice_text() { "stream=True" echo ">>> Checking text data in form format, upload file" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ @@ -228,19 +229,21 @@ function validate_megaservice_text() { function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" - validate_services_json \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "json" \ "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" echo ">>> Checking audio data in form format" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=audio" \ "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ @@ -248,19 +251,21 @@ function validate_megaservice_multimedia() { "stream=True" echo ">>> Checking video data in json format" - validate_services_json \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "json" \ "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" echo ">>> Checking video data in form format" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=video" \ "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ @@ -270,11 +275,12 @@ function validate_megaservice_multimedia() { function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -282,11 +288,12 @@ function validate_megaservice_long_text() { "summary_type=auto" echo ">>> Checking long text data in form format, set summary_type=stuff" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -294,11 +301,12 @@ function validate_megaservice_long_text() { "summary_type=stuff" echo ">>> Checking long text data in form format, set summary_type=truncate" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -306,11 +314,12 @@ function validate_megaservice_long_text() { "summary_type=truncate" echo ">>> Checking long text data in form format, set summary_type=map_reduce" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -318,11 +327,12 @@ function validate_megaservice_long_text() { "summary_type=map_reduce" echo ">>> Checking long text data in form format, set summary_type=refine" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index 90e63a1f0d..cc42644a45 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -27,6 +27,7 @@ export DocSum_COMPONENT_NAME="OpeaDocSumTgi" export MEGA_SERVICE_HOST_IP=${host_ip} export LLM_SERVICE_HOST_IP=${host_ip} export ASR_SERVICE_HOST_IP=${host_ip} +export FRONTEND_SERVICE_PORT=5173 export BACKEND_SERVICE_PORT=8888 export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" export LOGFLAG=True @@ -84,70 +85,46 @@ input_data_for_test() { esac } -function validate_services_json() { +function validate_service() { local URL="$1" local EXPECTED_RESULT="$2" local SERVICE_NAME="$3" local DOCKER_NAME="$4" - local INPUT_DATA="$5" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") - - echo "===========================================" - - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - - local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) - - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "EXPECTED_RESULT==> $EXPECTED_RESULT" - echo "CONTENT==> $CONTENT" - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - - fi + local VALIDATE_TYPE="$5" + local INPUT_DATA="$6" + local FORM_DATA1="$7" + local FORM_DATA2="$8" + local FORM_DATA3="$9" + local FORM_DATA4="$10" + local FORM_DATA5="$11" + + if [[ $VALIDATE_TYPE == *"json"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") fi - sleep 1s -} - -function validate_services_form() { - local URL="$1" - local EXPECTED_RESULT="$2" - local SERVICE_NAME="$3" - local DOCKER_NAME="$4" - local FORM_DATA1="$5" - local FORM_DATA2="$6" - local FORM_DATA3="$7" - local FORM_DATA4="$8" - local FORM_DATA5="$9" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - - local CONTENT=$(curl -s -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - fi - else + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "RESPONSE_BODY==> $RESPONSE_BODY" + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + sleep 1s } @@ -155,47 +132,52 @@ function validate_microservices() { # Check if the microservices are running correctly. # tgi for llm service - validate_services_json \ + validate_service \ "${host_ip}:${LLM_ENDPOINT_PORT}/generate" \ "generated_text" \ "tgi-server" \ - "tgi-server" \ + "docsum-xeon-tgi-server" \ + "json" \ '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' # llm microservice - validate_services_json \ + validate_service \ "${host_ip}:${LLM_PORT}/v1/docsum" \ "text" \ "llm-docsum-tgi" \ - "llm-docsum-server" \ + "docsum-xeon-llm-server" \ + "json" \ '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' # whisper microservice ulimit -s 65536 - validate_services_json \ + validate_service \ "${host_ip}:7066/v1/asr" \ '{"asr_result":"well"}' \ "whisper" \ - "whisper-server" \ + "docsum-xeon-whisper-server" \ + "json" \ "{\"audio\": \"$(input_data_for_test "audio")\"}" } function validate_megaservice_text() { echo ">>> Checking text data in json format" - validate_services_json \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "json" \ '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' echo ">>> Checking text data in form format, set language=en" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \ "max_tokens=32" \ @@ -203,11 +185,12 @@ function validate_megaservice_text() { "stream=True" echo ">>> Checking text data in form format, set language=zh" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=2024年9月26日,北京——今日,英特尔正式发布英特尔® 至强® 6性能核处理器(代号Granite Rapids),为AI、数据分析、科学计算等计算密集型业务提供卓越性能。" \ "max_tokens=32" \ @@ -215,11 +198,12 @@ function validate_megaservice_text() { "stream=True" echo ">>> Checking text data in form format, upload file" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ @@ -229,19 +213,21 @@ function validate_megaservice_text() { function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" - validate_services_json \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "json" \ "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" echo ">>> Checking audio data in form format" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=audio" \ "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ @@ -249,19 +235,21 @@ function validate_megaservice_multimedia() { "stream=True" echo ">>> Checking video data in json format" - validate_services_json \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "json" \ "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" echo ">>> Checking video data in form format" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=video" \ "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ @@ -271,11 +259,12 @@ function validate_megaservice_multimedia() { function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -283,11 +272,12 @@ function validate_megaservice_long_text() { "summary_type=auto" echo ">>> Checking long text data in form format, set summary_type=stuff" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -295,11 +285,12 @@ function validate_megaservice_long_text() { "summary_type=stuff" echo ">>> Checking long text data in form format, set summary_type=truncate" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -307,11 +298,12 @@ function validate_megaservice_long_text() { "summary_type=truncate" echo ">>> Checking long text data in form format, set summary_type=map_reduce" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -319,11 +311,12 @@ function validate_megaservice_long_text() { "summary_type=map_reduce" echo ">>> Checking long text data in form format, set summary_type=refine" - validate_services_form \ + validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ From 0c44b7ed2998d4e21117146c649fff9b7ebb58ed Mon Sep 17 00:00:00 2001 From: letonghan Date: Tue, 25 Mar 2025 13:44:16 +0800 Subject: [PATCH 03/12] support vllm in DocSum gaudi and refine related files Signed-off-by: letonghan --- .../docker_compose/intel/cpu/xeon/README.md | 34 +- .../intel/cpu/xeon/compose.yaml | 8 +- .../docker_compose/intel/hpu/gaudi/README.md | 36 +- .../intel/hpu/gaudi/compose.yaml | 55 ++- .../intel/hpu/gaudi/compose_tgi.yaml | 114 ++++++ DocSum/tests/test_compose_on_gaudi.sh | 210 +++++----- DocSum/tests/test_compose_on_xeon.sh | 17 +- DocSum/tests/test_compose_tgi_on_gaudi.sh | 375 ++++++++++++++++++ DocSum/tests/test_compose_tgi_on_xeon.sh | 4 +- 9 files changed, 697 insertions(+), 156 deletions(-) create mode 100644 DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml create mode 100644 DocSum/tests/test_compose_tgi_on_gaudi.sh diff --git a/DocSum/docker_compose/intel/cpu/xeon/README.md b/DocSum/docker_compose/intel/cpu/xeon/README.md index 9465c0c976..d367b20e38 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/README.md +++ b/DocSum/docker_compose/intel/cpu/xeon/README.md @@ -2,6 +2,8 @@ This document outlines the deployment process for a Document Summarization application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on an Intel Xeon server. The steps include Docker image creation, container deployment via Docker Compose, and service execution to integrate microservices such as `llm`. We will publish the Docker images to Docker Hub soon, which will simplify the deployment process for this service. +The default pipeline deploys with vLLM as the LLM serving component. It also provides options of using TGI backend for LLM microservice, please refer to [start-microservice-docker-containers](#start-microservice-docker-containers) section in this page. + ## 🚀 Apply Intel Xeon Server on AWS To apply a Intel Xeon server on AWS, start by creating an AWS account if you don't have one already. Then, head to the [EC2 Console](https://console.aws.amazon.com/ec2/v2/home) to begin the process. Within the EC2 service, select the Amazon EC2 M7i or M7i-flex instance type to leverage 4th Generation Intel Xeon Scalable processors. These instances are optimized for high-performance computing and demanding workloads. @@ -116,9 +118,20 @@ To set up environment variables for deploying Document Summarization services, f ```bash cd GenAIExamples/DocSum/docker_compose/intel/cpu/xeon +``` + +If use vLLM as the LLM serving backend. + +```bash docker compose -f compose.yaml up -d ``` +If use TGI as the LLM serving backend. + +```bash +docker compose -f compose_tgi.yaml up -d +``` + You will have the following Docker Images: 1. `opea/docsum-ui:latest` @@ -128,10 +141,27 @@ You will have the following Docker Images: ### Validate Microservices -1. TGI Service +1. LLM backend Service + + In the first startup, this service will take more time to download, load and warm up the model. After it's finished, the service will be ready. + Try the command below to check whether the LLM serving is ready. + ```bash + # vLLM service + docker logs docsum-xeon-vllm-service 2>&1 | grep complete + # If the service is ready, you will get the response like below. + INFO: Application startup complete. + ``` + ```bash + # TGI service + docker logs docsum-xeon-tgi-service | grep Connected + # If the service is ready, you will get the response like below. + 2024-09-03T02:47:53.402023Z INFO text_generation_router::server: router/src/server.rs:2311: Connected + ``` + Then try the `cURL` command below to validate services. ```bash - curl http://${host_ip}:8008/generate \ + # either vLLM or TGI service + curl http://${host_ip}:8008/v1/chat/completions \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \ -H 'Content-Type: application/json' diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml index af7a3dcc8b..e6425b2b92 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml @@ -24,11 +24,11 @@ services: retries: 100 command: --model $LLM_MODEL_ID --host 0.0.0.0 --port 80 - llm-docsum-tgi: + llm-docsum-vllm: image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} container_name: docsum-xeon-llm-server depends_on: - tgi-server: + vllm-service: condition: service_healthy ports: - ${LLM_PORT:-9000}:9000 @@ -62,8 +62,8 @@ services: image: ${REGISTRY:-opea}/docsum:${TAG:-latest} container_name: docsum-xeon-backend-server depends_on: - - tgi-server - - llm-docsum-tgi + - vllm-service + - llm-docsum-vllm ports: - "${BACKEND_SERVICE_PORT:-8888}:8888" environment: diff --git a/DocSum/docker_compose/intel/hpu/gaudi/README.md b/DocSum/docker_compose/intel/hpu/gaudi/README.md index d150b3f28e..4348d6b303 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/README.md +++ b/DocSum/docker_compose/intel/hpu/gaudi/README.md @@ -2,6 +2,8 @@ This document outlines the deployment process for a Document Summarization application utilizing the [GenAIComps](https://github.com/opea-project/GenAIComps.git) microservice pipeline on Intel Gaudi server. The steps include Docker image creation, container deployment via Docker Compose, and service execution to integrate microservices such as `llm`. We will publish the Docker images to Docker Hub soon, which will simplify the deployment process for this service. +The default pipeline deploys with vLLM as the LLM serving component. It also provides options of using TGI backend for LLM microservice, please refer to [start-microservice-docker-containers](#start-microservice-docker-containers) section in this page. + ## 🚀 Build Docker Images ### 1. Build MicroService Docker Image @@ -107,10 +109,21 @@ To set up environment variables for deploying Document Summarization services, f ### Start Microservice Docker Containers ```bash -cd GenAIExamples/DocSum/docker_compose/intel/hpu/gaudi +cd GenAIExamples/DocSum/docker_compose/intel/cpu/gaudi +``` + +If use vLLM as the LLM serving backend. + +```bash docker compose -f compose.yaml up -d ``` +If use TGI as the LLM serving backend. + +```bash +docker compose -f compose_tgi.yaml up -d +``` + You will have the following Docker Images: 1. `opea/docsum-ui:latest` @@ -120,10 +133,27 @@ You will have the following Docker Images: ### Validate Microservices -1. TGI Service +1. LLM backend Service + + In the first startup, this service will take more time to download, load and warm up the model. After it's finished, the service will be ready. + Try the command below to check whether the LLM serving is ready. + ```bash + # vLLM service + docker logs docsum-xeon-vllm-service 2>&1 | grep complete + # If the service is ready, you will get the response like below. + INFO: Application startup complete. + ``` + ```bash + # TGI service + docker logs docsum-xeon-tgi-service | grep Connected + # If the service is ready, you will get the response like below. + 2024-09-03T02:47:53.402023Z INFO text_generation_router::server: router/src/server.rs:2311: Connected + ``` + Then try the `cURL` command below to validate services. ```bash - curl http://${host_ip}:8008/generate \ + # either vLLM or TGI service + curl http://${host_ip}:8008/v1/chat/completions \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' \ -H 'Content-Type: application/json' diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml index 9ae96e3d7d..6265206e9c 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml @@ -2,47 +2,42 @@ # SPDX-License-Identifier: Apache-2.0 services: - tgi-gaudi-server: - image: ghcr.io/huggingface/tgi-gaudi:2.3.1 - container_name: tgi-gaudi-server + vllm-service: + image: ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} + container_name: docsum-gaudi-vllm-service ports: - - ${LLM_ENDPOINT_PORT:-8008}:80 + - "8008:80" volumes: - - "${DATA_PATH:-./data}:/data" + - "${MODEL_CACHE:-./data}:/root/.cache/huggingface/hub" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} - HF_HUB_DISABLE_PROGRESS_BARS: 1 - HF_HUB_ENABLE_HF_TRANSFER: 0 + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} HABANA_VISIBLE_DEVICES: all OMPI_MCA_btl_vader_single_copy_mechanism: none - ENABLE_HPU_GRAPH: true - LIMIT_HPU_GRAPH: true - USE_FLASH_ATTENTION: true - FLASH_ATTENTION_RECOMPUTE: true - host_ip: ${host_ip} - LLM_ENDPOINT_PORT: ${LLM_ENDPOINT_PORT} - runtime: habana - cap_add: - - SYS_NICE - ipc: host + LLM_MODEL_ID: ${LLM_MODEL_ID} + NUM_CARDS: ${NUM_CARDS} + VLLM_TORCH_PROFILER_DIR: "/mnt" healthcheck: - test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"] interval: 10s timeout: 10s retries: 100 - command: --model-id ${LLM_MODEL_ID} --max-input-length ${MAX_INPUT_TOKENS} --max-total-tokens ${MAX_TOTAL_TOKENS} - - llm-docsum-tgi: + runtime: habana + cap_add: + - SYS_NICE + ipc: host + command: --model $LLM_MODEL_ID --tensor-parallel-size ${NUM_CARDS} --host 0.0.0.0 --port 80 --block-size ${BLOCK_SIZE} --max-num-seqs ${MAX_NUM_SEQS} --max-seq_len-to-capture ${MAX_SEQ_LEN_TO_CAPTURE} + + llm-docsum-vllm: image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} - container_name: llm-docsum-gaudi-server + container_name: docsum-gaudi-llm-server depends_on: - tgi-gaudi-server: + vllm-service: condition: service_healthy ports: - - ${DOCSUM_PORT:-9000}:9000 + - ${LLM_PORT:-9000}:9000 ipc: host environment: no_proxy: ${no_proxy} @@ -59,7 +54,7 @@ services: whisper: image: ${REGISTRY:-opea}/whisper:${TAG:-latest} - container_name: whisper-server + container_name: docsum-gaudi-whisper-server ports: - "7066:7066" ipc: host @@ -78,10 +73,10 @@ services: image: ${REGISTRY:-opea}/docsum:${TAG:-latest} container_name: docsum-gaudi-backend-server depends_on: - - tgi-gaudi-server - - llm-docsum-tgi + - vllm-service + - llm-docsum-vllm ports: - - "8888:8888" + - "${BACKEND_SERVICE_PORT:-8888}:8888" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} @@ -99,7 +94,7 @@ services: depends_on: - docsum-gaudi-backend-server ports: - - "5173:5173" + - "${FRONTEND_SERVICE_PORT:-5173}:5173" environment: - no_proxy=${no_proxy} - https_proxy=${https_proxy} diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml new file mode 100644 index 0000000000..34516be195 --- /dev/null +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml @@ -0,0 +1,114 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + tgi-gaudi-server: + image: ghcr.io/huggingface/tgi-gaudi:2.3.1 + container_name: docsum-gaudi-tgi-server + ports: + - ${LLM_ENDPOINT_PORT:-8008}:80 + volumes: + - "${MODEL_CACHE}:/data" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HUGGING_FACE_HUB_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + HABANA_VISIBLE_DEVICES: all + OMPI_MCA_btl_vader_single_copy_mechanism: none + ENABLE_HPU_GRAPH: true + LIMIT_HPU_GRAPH: true + USE_FLASH_ATTENTION: true + FLASH_ATTENTION_RECOMPUTE: true + host_ip: ${host_ip} + LLM_ENDPOINT_PORT: ${LLM_ENDPOINT_PORT} + runtime: habana + cap_add: + - SYS_NICE + ipc: host + healthcheck: + test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + command: --model-id ${LLM_MODEL_ID} --max-input-length ${MAX_INPUT_TOKENS} --max-total-tokens ${MAX_TOTAL_TOKENS} + + llm-docsum-tgi: + image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} + container_name: docsum-gaudi-llm-server + depends_on: + tgi-gaudi-server: + condition: service_healthy + ports: + - ${LLM_PORT:-9000}:9000 + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS} + MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS} + LLM_ENDPOINT: ${LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} + DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME} + LOGFLAG: ${LOGFLAG:-False} + restart: unless-stopped + + whisper: + image: ${REGISTRY:-opea}/whisper:${TAG:-latest} + container_name: docsum-gaudi-whisper-server + ports: + - "7066:7066" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HABANA_VISIBLE_DEVICES: all + OMPI_MCA_btl_vader_single_copy_mechanism: none + runtime: habana + cap_add: + - SYS_NICE + restart: unless-stopped + + docsum-gaudi-backend-server: + image: ${REGISTRY:-opea}/docsum:${TAG:-latest} + container_name: docsum-gaudi-backend-server + depends_on: + - tgi-gaudi-server + - llm-docsum-tgi + ports: + - "${BACKEND_SERVICE_PORT:-8888}:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + - ASR_SERVICE_HOST_IP=${ASR_SERVICE_HOST_IP} + + ipc: host + restart: always + + docsum-gradio-ui: + image: ${REGISTRY:-opea}/docsum-gradio-ui:${TAG:-latest} + container_name: docsum-gaudi-ui-server + depends_on: + - docsum-gaudi-backend-server + ports: + - "${FRONTEND_SERVICE_PORT:-5173}:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - BACKEND_SERVICE_ENDPOINT=${BACKEND_SERVICE_ENDPOINT} + - DOC_BASE_URL=${BACKEND_SERVICE_ENDPOINT} + ipc: host + restart: always + +networks: + default: + driver: bridge diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 1c8632b76d..77999ae32e 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -12,23 +12,29 @@ export host_ip=$(hostname -I | awk '{print $1}') echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export no_proxy="${no_proxy},${host_ip}" +export MODEL_CACHE=${model_cache:-"./data"} export REGISTRY=${IMAGE_REPO} export TAG=${IMAGE_TAG} +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export LLM_ENDPOINT_PORT=8008 +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export NUM_CARDS=1 +export BLOCK_SIZE=128 +export MAX_NUM_SEQS=256 +export MAX_SEQ_LEN_TO_CAPTURE=2048 export MAX_INPUT_TOKENS=2048 export MAX_TOTAL_TOKENS=4096 -export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" -export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export LLM_PORT=9000 +export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" +export DocSum_COMPONENT_NAME="OpeaDocSumvLLM" export MEGA_SERVICE_HOST_IP=${host_ip} export LLM_SERVICE_HOST_IP=${host_ip} export ASR_SERVICE_HOST_IP=${host_ip} -export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/docsum" -export no_proxy="${no_proxy},${host_ip}" -export LLM_ENDPOINT_PORT=8008 -export DOCSUM_PORT=9000 -export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" -export DocSum_COMPONENT_NAME="OpeaDocSumTgi" +export FRONTEND_SERVICE_PORT=5173 +export BACKEND_SERVICE_PORT=8888 +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" export LOGFLAG=True -export DATA_PATH=${model_cache:-"/data/cache"} WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" @@ -39,17 +45,31 @@ ROOT_FOLDER=$(dirname "$(readlink -f "$0")") function build_docker_images() { opea_branch=${opea_branch:-"main"} + # If the opea_branch isn't main, replace the git clone branch in Dockerfile. + if [[ "${opea_branch}" != "main" ]]; then + cd $WORKPATH + OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git" + NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git" + find . -type f -name "Dockerfile*" | while read -r file; do + echo "Processing file: $file" + sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file" + done + fi + cd $WORKPATH/docker_image_build git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git pushd GenAIComps docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . popd && sleep 1s + git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork + VLLM_VER=$(git describe --tags "$(git rev-list --tags --max-count=1)") + git checkout ${VLLM_VER} &> /dev/null && cd ../ + echo "Build all the images with --no-cache, check docker_image_build.log for details..." - service_list="docsum docsum-gradio-ui whisper llm-docsum" + service_list="docsum docsum-gradio-ui whisper llm-docsum vllm-gaudi" docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log - docker pull ghcr.io/huggingface/tgi-gaudi:2.3.1 docker images && sleep 1s } @@ -84,115 +104,90 @@ input_data_for_test() { esac } -function validate_services_json() { +function validate_service() { local URL="$1" local EXPECTED_RESULT="$2" local SERVICE_NAME="$3" local DOCKER_NAME="$4" - local INPUT_DATA="$5" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") - - echo "===========================================" - - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - - local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) - - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - fi + local VALIDATE_TYPE="$5" + local INPUT_DATA="$6" + local FORM_DATA1="$7" + local FORM_DATA2="$8" + local FORM_DATA3="$9" + local FORM_DATA4="${10}" + local FORM_DATA5="${11}" + + if [[ $VALIDATE_TYPE == *"json"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") fi - sleep 1s -} + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') -function validate_services_form() { - local URL="$1" - local EXPECTED_RESULT="$2" - local SERVICE_NAME="$3" - local DOCKER_NAME="$4" - local FORM_DATA1="$5" - local FORM_DATA2="$6" - local FORM_DATA3="$7" - local FORM_DATA4="$8" - local FORM_DATA5="$9" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") - - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - local CONTENT=$(curl -s -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) - - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - fi - else + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "RESPONSE_BODY==> $RESPONSE_BODY" + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." fi + sleep 1s } function validate_microservices() { # Check if the microservices are running correctly. - # tgi for llm service - validate_services_json \ - "${host_ip}:8008/generate" \ - "generated_text" \ - "tgi-gaudi-server" \ - "tgi-gaudi-server" \ - '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' - # llm microservice - validate_services_json \ - "${host_ip}:9000/v1/docsum" \ + validate_service \ + "${host_ip}:${LLM_PORT}/v1/docsum" \ "text" \ - "llm-docsum-tgi" \ - "llm-docsum-gaudi-server" \ + "llm-docsum-vllm" \ + "docsum-gaudi-llm-server" \ + "json" \ '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' # whisper microservice ulimit -s 65536 - validate_services_json \ + validate_service \ "${host_ip}:7066/v1/asr" \ '{"asr_result":"well"}' \ "whisper" \ - "whisper-server" \ + "docsum-gaudi-whisper-server" \ + "json" \ "{\"audio\": \"$(input_data_for_test "audio")\"}" } function validate_megaservice_text() { echo ">>> Checking text data in json format" - validate_services_json \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "json" \ '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' echo ">>> Checking text data in form format, set language=en" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \ "max_tokens=32" \ @@ -200,11 +195,12 @@ function validate_megaservice_text() { "stream=True" echo ">>> Checking text data in form format, set language=zh" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=2024年9月26日,北京——今日,英特尔正式发布英特尔® 至强® 6性能核处理器(代号Granite Rapids),为AI、数据分析、科学计算等计算密集型业务提供卓越性能。" \ "max_tokens=32" \ @@ -212,11 +208,12 @@ function validate_megaservice_text() { "stream=True" echo ">>> Checking text data in form format, upload file" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ @@ -226,19 +223,21 @@ function validate_megaservice_text() { function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" - validate_services_json \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "json" \ "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" echo ">>> Checking audio data in form format" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=audio" \ "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ @@ -246,19 +245,21 @@ function validate_megaservice_multimedia() { "stream=True" echo ">>> Checking video data in json format" - validate_services_json \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "json" \ "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" echo ">>> Checking video data in form format" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=video" \ "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ @@ -268,11 +269,12 @@ function validate_megaservice_multimedia() { function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -280,11 +282,12 @@ function validate_megaservice_long_text() { "summary_type=auto" echo ">>> Checking long text data in form format, set summary_type=stuff" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -292,11 +295,12 @@ function validate_megaservice_long_text() { "summary_type=stuff" echo ">>> Checking long text data in form format, set summary_type=truncate" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -304,11 +308,12 @@ function validate_megaservice_long_text() { "summary_type=truncate" echo ">>> Checking long text data in form format, set summary_type=map_reduce" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -316,11 +321,12 @@ function validate_megaservice_long_text() { "summary_type=map_reduce" echo ">>> Checking long text data in form format, set summary_type=refine" - validate_services_form \ - "${host_ip}:8888/v1/docsum" \ + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ "[DONE]" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ + "media" "" \ "type=text" \ "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ @@ -330,7 +336,7 @@ function validate_megaservice_long_text() { function stop_docker() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - docker compose stop && docker compose rm -f + docker compose -f compose.yaml stop && docker compose rm -f } function main() { diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index 8ef7969a14..4ebb8d3339 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -21,7 +21,7 @@ export LLM_ENDPOINT_PORT=8008 export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export MAX_INPUT_TOKENS=2048 export MAX_TOTAL_TOKENS=4096 -export DOCSUM_PORT=9000 +export LLM_PORT=9000 export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" export DocSum_COMPONENT_NAME="OpeaDocSumvLLM" export MEGA_SERVICE_HOST_IP=${host_ip} @@ -111,8 +111,8 @@ function validate_service() { local FORM_DATA1="$7" local FORM_DATA2="$8" local FORM_DATA3="$9" - local FORM_DATA4="$10" - local FORM_DATA5="$11" + local FORM_DATA4="${10}" + local FORM_DATA5="${11}" if [[ $VALIDATE_TYPE == *"json"* ]]; then HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") @@ -147,20 +147,11 @@ function validate_service() { function validate_microservices() { # Check if the microservices are running correctly. - # tgi for llm service - validate_service \ - "${host_ip}:${LLM_ENDPOINT_PORT}/generate" \ - "generated_text" \ - "tgi-server" \ - "docsum-xeon-tgi-server" \ - "json" \ - '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' - # llm microservice validate_service \ "${host_ip}:${LLM_PORT}/v1/docsum" \ "text" \ - "llm-docsum-tgi" \ + "llm-docsum-vllm" \ "docsum-xeon-llm-server" \ "json" \ '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' diff --git a/DocSum/tests/test_compose_tgi_on_gaudi.sh b/DocSum/tests/test_compose_tgi_on_gaudi.sh new file mode 100644 index 0000000000..94ab0d4a7f --- /dev/null +++ b/DocSum/tests/test_compose_tgi_on_gaudi.sh @@ -0,0 +1,375 @@ +#!/bin/bash +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +set -xe + +IMAGE_REPO=${IMAGE_REPO:-"opea"} +IMAGE_TAG=${IMAGE_TAG:-"latest"} +export http_proxy=$http_proxy +export https_proxy=$https_proxy +export host_ip=$(hostname -I | awk '{print $1}') + +echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" +echo "TAG=IMAGE_TAG=${IMAGE_TAG}" +export no_proxy="${no_proxy},${host_ip}" +export MODEL_CACHE=${model_cache:-"./data"} +export REGISTRY=${IMAGE_REPO} +export TAG=${IMAGE_TAG} +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export LLM_ENDPOINT_PORT=8008 +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export MAX_INPUT_TOKENS=2048 +export MAX_TOTAL_TOKENS=4096 +export LLM_PORT=9000 +export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" +export MEGA_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} +export ASR_SERVICE_HOST_IP=${host_ip} +export FRONTEND_SERVICE_PORT=5173 +export BACKEND_SERVICE_PORT=8888 +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" +export LOGFLAG=True + +WORKPATH=$(dirname "$PWD") +LOG_PATH="$WORKPATH/tests" + + +# Get the root folder of the current script +ROOT_FOLDER=$(dirname "$(readlink -f "$0")") + +function build_docker_images() { + opea_branch=${opea_branch:-"main"} + cd $WORKPATH/docker_image_build + git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git + pushd GenAIComps + docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile . + popd && sleep 1s + + echo "Build all the images with --no-cache, check docker_image_build.log for details..." + service_list="docsum docsum-gradio-ui whisper llm-docsum" + docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log + + docker pull ghcr.io/huggingface/tgi-gaudi:2.3.1 + docker images && sleep 1s +} + +function start_services() { + cd $WORKPATH/docker_compose/intel/hpu/gaudi + docker compose -f compose_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log + sleep 1m +} + +get_base64_str() { + local file_name=$1 + base64 -w 0 "$file_name" +} + +# Function to generate input data for testing based on the document type +input_data_for_test() { + local document_type=$1 + case $document_type in + ("text") + echo "THIS IS A TEST >>>> and a number of states are starting to adopt them voluntarily special correspondent john delenco of education week reports it takes just 10 minutes to cross through gillette wyoming this small city sits in the northeast corner of the state surrounded by 100s of miles of prairie but schools here in campbell county are on the edge of something big the next generation science standards you are going to build a strand of dna and you are going to decode it and figure out what that dna actually says for christy mathis at sage valley junior high school the new standards are about learning to think like a scientist there is a lot of really good stuff in them every standard is a performance task it is not you know the child needs to memorize these things it is the student needs to be able to do some pretty intense stuff we are analyzing we are critiquing we are." + ;; + ("audio") + get_base64_str "$ROOT_FOLDER/data/test.wav" + ;; + ("video") + get_base64_str "$ROOT_FOLDER/data/test.mp4" + ;; + (*) + echo "Invalid document type" >&2 + exit 1 + ;; + esac +} + +function validate_service() { + local URL="$1" + local EXPECTED_RESULT="$2" + local SERVICE_NAME="$3" + local DOCKER_NAME="$4" + local VALIDATE_TYPE="$5" + local INPUT_DATA="$6" + local FORM_DATA1="$7" + local FORM_DATA2="$8" + local FORM_DATA3="$9" + local FORM_DATA4="${10}" + local FORM_DATA5="${11}" + + if [[ $VALIDATE_TYPE == *"json"* ]]; then + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") + else + HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + fi + HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') + RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') + + docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log + + # check response status + if [ "$HTTP_STATUS" -ne "200" ]; then + echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" + exit 1 + else + echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." + fi + # check response body + if [[ "$RESPONSE_BODY" != *"$EXPECTED_RESULT"* ]]; then + echo "EXPECTED_RESULT==> $EXPECTED_RESULT" + echo "RESPONSE_BODY==> $RESPONSE_BODY" + echo "[ $SERVICE_NAME ] Content does not match the expected result: $RESPONSE_BODY" + exit 1 + else + echo "[ $SERVICE_NAME ] Content is as expected." + fi + + sleep 1s +} + +function validate_microservices() { + # Check if the microservices are running correctly. + + # tgi for llm service + validate_service \ + "${host_ip}:${LLM_ENDPOINT_PORT}/generate" \ + "generated_text" \ + "tgi-server" \ + "docsum-gaudi-tgi-server" \ + "json" \ + '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' + + # llm microservice + validate_service \ + "${host_ip}:${LLM_PORT}/v1/docsum" \ + "text" \ + "llm-docsum-tgi" \ + "docsum-gaudi-llm-server" \ + "json" \ + '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + + # whisper microservice + ulimit -s 65536 + validate_service \ + "${host_ip}:7066/v1/asr" \ + '{"asr_result":"well"}' \ + "whisper" \ + "docsum-gaudi-whisper-server" \ + "json" \ + "{\"audio\": \"$(input_data_for_test "audio")\"}" + +} + +function validate_megaservice_text() { + echo ">>> Checking text data in json format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "json" \ + '{"type": "text", "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' + + echo ">>> Checking text data in form format, set language=en" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." \ + "max_tokens=32" \ + "language=en" \ + "stream=True" + + echo ">>> Checking text data in form format, set language=zh" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=2024年9月26日,北京——今日,英特尔正式发布英特尔® 至强® 6性能核处理器(代号Granite Rapids),为AI、数据分析、科学计算等计算密集型业务提供卓越性能。" \ + "max_tokens=32" \ + "language=zh" \ + "stream=True" + + echo ">>> Checking text data in form format, upload file" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/short.txt" \ + "max_tokens=32" \ + "language=en" +} + +function validate_megaservice_multimedia() { + echo ">>> Checking audio data in json format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "json" \ + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + + echo ">>> Checking audio data in form format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=audio" \ + "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ + "max_tokens=32" \ + "language=en" \ + "stream=True" + + echo ">>> Checking video data in json format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "json" \ + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + + echo ">>> Checking video data in form format" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=video" \ + "messages=\"$(input_data_for_test "video")\"" \ + "max_tokens=32" \ + "language=en" \ + "stream=True" +} + +function validate_megaservice_long_text() { + echo ">>> Checking long text data in form format, set summary_type=auto" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=auto" + + echo ">>> Checking long text data in form format, set summary_type=stuff" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=stuff" + + echo ">>> Checking long text data in form format, set summary_type=truncate" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=truncate" + + echo ">>> Checking long text data in form format, set summary_type=map_reduce" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=map_reduce" + + echo ">>> Checking long text data in form format, set summary_type=refine" + validate_service \ + "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ + "[DONE]" \ + "docsum-gaudi-backend-server" \ + "docsum-gaudi-backend-server" \ + "media" "" \ + "type=text" \ + "messages=" \ + "files=@$ROOT_FOLDER/data/long.txt" \ + "max_tokens=128" \ + "summary_type=refine" +} + +function stop_docker() { + cd $WORKPATH/docker_compose/intel/hpu/gaudi + docker compose -f compose_tgi.yaml stop && docker compose rm -f +} + +function main() { + echo "===========================================" + echo ">>>> Stopping any running Docker containers..." + stop_docker + + echo "===========================================" + if [[ "$IMAGE_REPO" == "opea" ]]; then + echo ">>>> Building Docker images..." + build_docker_images + fi + + echo "===========================================" + echo ">>>> Starting Docker services..." + start_services + + echo "===========================================" + echo ">>>> Validating microservices..." + validate_microservices + + echo "===========================================" + echo ">>>> Validating megaservice for text..." + validate_megaservice_text + + echo "===========================================" + echo ">>>> Validating megaservice for multimedia..." + validate_megaservice_multimedia + + echo "===========================================" + echo ">>>> Validating megaservice for long text..." + validate_megaservice_long_text + + echo "===========================================" + echo ">>>> Stopping Docker containers..." + stop_docker + + echo "===========================================" + echo ">>>> Pruning Docker system..." + echo y | docker system prune + echo ">>>> Docker system pruned successfully." + echo "===========================================" +} + +main diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index cc42644a45..9d1f5bebc8 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -95,8 +95,8 @@ function validate_service() { local FORM_DATA1="$7" local FORM_DATA2="$8" local FORM_DATA3="$9" - local FORM_DATA4="$10" - local FORM_DATA5="$11" + local FORM_DATA4="${10}" + local FORM_DATA5="${11}" if [[ $VALIDATE_TYPE == *"json"* ]]; then HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") From 5cc148d1fb40a32d372522d9c544f1f585eb4444 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Mar 2025 05:47:16 +0000 Subject: [PATCH 04/12] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- DocSum/docker_compose/intel/cpu/xeon/README.md | 3 +++ DocSum/docker_compose/intel/hpu/gaudi/README.md | 3 +++ DocSum/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/DocSum/docker_compose/intel/cpu/xeon/README.md b/DocSum/docker_compose/intel/cpu/xeon/README.md index d367b20e38..2aacacafbb 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/README.md +++ b/DocSum/docker_compose/intel/cpu/xeon/README.md @@ -145,18 +145,21 @@ You will have the following Docker Images: In the first startup, this service will take more time to download, load and warm up the model. After it's finished, the service will be ready. Try the command below to check whether the LLM serving is ready. + ```bash # vLLM service docker logs docsum-xeon-vllm-service 2>&1 | grep complete # If the service is ready, you will get the response like below. INFO: Application startup complete. ``` + ```bash # TGI service docker logs docsum-xeon-tgi-service | grep Connected # If the service is ready, you will get the response like below. 2024-09-03T02:47:53.402023Z INFO text_generation_router::server: router/src/server.rs:2311: Connected ``` + Then try the `cURL` command below to validate services. ```bash diff --git a/DocSum/docker_compose/intel/hpu/gaudi/README.md b/DocSum/docker_compose/intel/hpu/gaudi/README.md index 4348d6b303..e2dc4c3da7 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/README.md +++ b/DocSum/docker_compose/intel/hpu/gaudi/README.md @@ -137,18 +137,21 @@ You will have the following Docker Images: In the first startup, this service will take more time to download, load and warm up the model. After it's finished, the service will be ready. Try the command below to check whether the LLM serving is ready. + ```bash # vLLM service docker logs docsum-xeon-vllm-service 2>&1 | grep complete # If the service is ready, you will get the response like below. INFO: Application startup complete. ``` + ```bash # TGI service docker logs docsum-xeon-tgi-service | grep Connected # If the service is ready, you will get the response like below. 2024-09-03T02:47:53.402023Z INFO text_generation_router::server: router/src/server.rs:2311: Connected ``` + Then try the `cURL` command below to validate services. ```bash diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml index 6265206e9c..7a0a104af9 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml @@ -29,7 +29,7 @@ services: - SYS_NICE ipc: host command: --model $LLM_MODEL_ID --tensor-parallel-size ${NUM_CARDS} --host 0.0.0.0 --port 80 --block-size ${BLOCK_SIZE} --max-num-seqs ${MAX_NUM_SEQS} --max-seq_len-to-capture ${MAX_SEQ_LEN_TO_CAPTURE} - + llm-docsum-vllm: image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} container_name: docsum-gaudi-llm-server From 42a9df0d8a543bee550f8891a786e291cad7fd35 Mon Sep 17 00:00:00 2001 From: letonghan Date: Tue, 25 Mar 2025 14:03:00 +0800 Subject: [PATCH 05/12] update expected results of long text tests Signed-off-by: letonghan --- DocSum/tests/test_compose_on_gaudi.sh | 10 +++++----- DocSum/tests/test_compose_on_xeon.sh | 10 +++++----- DocSum/tests/test_compose_tgi_on_gaudi.sh | 10 +++++----- DocSum/tests/test_compose_tgi_on_xeon.sh | 10 +++++----- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 77999ae32e..8c4458b67a 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -271,7 +271,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -284,7 +284,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -297,7 +297,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -310,7 +310,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -323,7 +323,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index 4ebb8d3339..ed9cb52ca7 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -268,7 +268,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -281,7 +281,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -294,7 +294,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -307,7 +307,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -320,7 +320,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ diff --git a/DocSum/tests/test_compose_tgi_on_gaudi.sh b/DocSum/tests/test_compose_tgi_on_gaudi.sh index 94ab0d4a7f..e62fa483c2 100644 --- a/DocSum/tests/test_compose_tgi_on_gaudi.sh +++ b/DocSum/tests/test_compose_tgi_on_gaudi.sh @@ -262,7 +262,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -275,7 +275,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -288,7 +288,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -301,7 +301,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -314,7 +314,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index 9d1f5bebc8..86b4809da3 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -261,7 +261,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=auto" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -274,7 +274,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -287,7 +287,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -300,7 +300,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -313,7 +313,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ From f4febe746d12b0e1b4e77c1c9fbe9fddc8857666 Mon Sep 17 00:00:00 2001 From: letonghan Date: Tue, 25 Mar 2025 22:49:49 +0800 Subject: [PATCH 06/12] refine test case Signed-off-by: letonghan --- DocSum/tests/test_compose_on_gaudi.sh | 43 ++++++++++++++--------- DocSum/tests/test_compose_on_xeon.sh | 43 ++++++++++++++--------- DocSum/tests/test_compose_tgi_on_gaudi.sh | 43 ++++++++++++++--------- DocSum/tests/test_compose_tgi_on_xeon.sh | 43 ++++++++++++++--------- 4 files changed, 108 insertions(+), 64 deletions(-) diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index 8c4458b67a..c1b6a52a5c 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -116,11 +116,16 @@ function validate_service() { local FORM_DATA3="$9" local FORM_DATA4="${10}" local FORM_DATA5="${11}" + local FORM_DATA6="${12}" if [[ $VALIDATE_TYPE == *"json"* ]]; then HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + CURL_CMD=(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + if [[ -n "$FORM_DATA6" ]]; then + CURL_CMD+=(-F "$FORM_DATA6") + fi + HTTP_RESPONSE=$("${CURL_CMD[@]}") fi HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') @@ -210,7 +215,7 @@ function validate_megaservice_text() { echo ">>> Checking text data in form format, upload file" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "TEI" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -218,23 +223,24 @@ function validate_megaservice_text() { "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=32" \ - "language=en" + "language=en" \ + "stream=False" } function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "well" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "json" \ - "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\", \"stream\": \"False\"}" echo ">>> Checking audio data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "you" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -242,21 +248,21 @@ function validate_megaservice_multimedia() { "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" echo ">>> Checking video data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "json" \ - "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"}" echo ">>> Checking video data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -264,7 +270,7 @@ function validate_megaservice_multimedia() { "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" } function validate_megaservice_long_text() { @@ -279,7 +285,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=auto" + "summary_type=auto" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ @@ -292,7 +299,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=stuff" + "summary_type=stuff" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ @@ -305,7 +313,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=truncate" + "summary_type=truncate" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ @@ -318,7 +327,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=map_reduce" + "summary_type=map_reduce" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ @@ -331,7 +341,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=refine" + "summary_type=refine" \ + "stream=False" } function stop_docker() { diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index ed9cb52ca7..4402475542 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -113,11 +113,16 @@ function validate_service() { local FORM_DATA3="$9" local FORM_DATA4="${10}" local FORM_DATA5="${11}" + local FORM_DATA6="${12}" if [[ $VALIDATE_TYPE == *"json"* ]]; then HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + CURL_CMD=(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + if [[ -n "$FORM_DATA6" ]]; then + CURL_CMD+=(-F "$FORM_DATA6") + fi + HTTP_RESPONSE=$("${CURL_CMD[@]}") fi HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') @@ -207,7 +212,7 @@ function validate_megaservice_text() { echo ">>> Checking text data in form format, upload file" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "TEI" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -215,23 +220,24 @@ function validate_megaservice_text() { "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=32" \ - "language=en" + "language=en" \ + "stream=False" } function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "well" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "json" \ - "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\", \"stream\": \"False\"}" echo ">>> Checking audio data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "you" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -239,21 +245,21 @@ function validate_megaservice_multimedia() { "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" echo ">>> Checking video data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "json" \ - "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"}" echo ">>> Checking video data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -261,7 +267,7 @@ function validate_megaservice_multimedia() { "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" } function validate_megaservice_long_text() { @@ -276,7 +282,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=auto" + "summary_type=auto" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ @@ -289,7 +296,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=stuff" + "summary_type=stuff" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ @@ -302,7 +310,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=truncate" + "summary_type=truncate" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ @@ -315,7 +324,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=map_reduce" + "summary_type=map_reduce" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ @@ -328,7 +338,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=refine" + "summary_type=refine" \ + "stream=False" } function stop_docker() { diff --git a/DocSum/tests/test_compose_tgi_on_gaudi.sh b/DocSum/tests/test_compose_tgi_on_gaudi.sh index e62fa483c2..b297108db8 100644 --- a/DocSum/tests/test_compose_tgi_on_gaudi.sh +++ b/DocSum/tests/test_compose_tgi_on_gaudi.sh @@ -98,11 +98,16 @@ function validate_service() { local FORM_DATA3="$9" local FORM_DATA4="${10}" local FORM_DATA5="${11}" + local FORM_DATA6="${12}" if [[ $VALIDATE_TYPE == *"json"* ]]; then HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + CURL_CMD=(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + if [[ -n "$FORM_DATA6" ]]; then + CURL_CMD+=(-F "$FORM_DATA6") + fi + HTTP_RESPONSE=$("${CURL_CMD[@]}") fi HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') @@ -201,7 +206,7 @@ function validate_megaservice_text() { echo ">>> Checking text data in form format, upload file" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "TEI" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -209,23 +214,24 @@ function validate_megaservice_text() { "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=32" \ - "language=en" + "language=en" \ + "stream=False" } function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "well" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "json" \ - "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\", \"stream\": \"False\"}" echo ">>> Checking audio data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "you" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -233,21 +239,21 @@ function validate_megaservice_multimedia() { "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" echo ">>> Checking video data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "json" \ - "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"}" echo ">>> Checking video data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ @@ -255,7 +261,7 @@ function validate_megaservice_multimedia() { "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" } function validate_megaservice_long_text() { @@ -270,7 +276,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=auto" + "summary_type=auto" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ @@ -283,7 +290,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=stuff" + "summary_type=stuff" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ @@ -296,7 +304,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=truncate" + "summary_type=truncate" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ @@ -309,7 +318,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=map_reduce" + "summary_type=map_reduce" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ @@ -322,7 +332,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=refine" + "summary_type=refine" \ + "stream=False" } function stop_docker() { diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index 86b4809da3..16986b18ea 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -97,11 +97,16 @@ function validate_service() { local FORM_DATA3="$9" local FORM_DATA4="${10}" local FORM_DATA5="${11}" + local FORM_DATA6="${12}" if [[ $VALIDATE_TYPE == *"json"* ]]; then HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") else - HTTP_RESPONSE=$(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + CURL_CMD=(curl --silent --write-out "HTTPSTATUS:%{http_code}" -X POST -F "$FORM_DATA1" -F "$FORM_DATA2" -F "$FORM_DATA3" -F "$FORM_DATA4" -F "$FORM_DATA5" -H 'Content-Type: multipart/form-data' "$URL") + if [[ -n "$FORM_DATA6" ]]; then + CURL_CMD+=(-F "$FORM_DATA6") + fi + HTTP_RESPONSE=$("${CURL_CMD[@]}") fi HTTP_STATUS=$(echo $HTTP_RESPONSE | tr -d '\n' | sed -e 's/.*HTTPSTATUS://') RESPONSE_BODY=$(echo $HTTP_RESPONSE | sed -e 's/HTTPSTATUS\:.*//g') @@ -200,7 +205,7 @@ function validate_megaservice_text() { echo ">>> Checking text data in form format, upload file" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "TEI" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -208,23 +213,24 @@ function validate_megaservice_text() { "messages=" \ "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=32" \ - "language=en" + "language=en" \ + "stream=False" } function validate_megaservice_multimedia() { echo ">>> Checking audio data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "well" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "json" \ - "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\"}" + "{\"type\": \"audio\", \"messages\": \"$(input_data_for_test "audio")\", \"stream\": \"False\"}" echo ">>> Checking audio data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "you" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -232,21 +238,21 @@ function validate_megaservice_multimedia() { "messages=UklGRigAAABXQVZFZm10IBIAAAABAAEARKwAAIhYAQACABAAAABkYXRhAgAAAAEA" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" echo ">>> Checking video data in json format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "json" \ - "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\"}" + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"s}" echo ">>> Checking video data in form format" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "[DONE]" \ + "bye" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -254,7 +260,7 @@ function validate_megaservice_multimedia() { "messages=\"$(input_data_for_test "video")\"" \ "max_tokens=32" \ "language=en" \ - "stream=True" + "stream=False" } function validate_megaservice_long_text() { @@ -269,7 +275,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=auto" + "summary_type=auto" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ @@ -282,7 +289,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=stuff" + "summary_type=stuff" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ @@ -295,7 +303,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=truncate" + "summary_type=truncate" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=map_reduce" validate_service \ @@ -308,7 +317,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=map_reduce" + "summary_type=map_reduce" \ + "stream=False" echo ">>> Checking long text data in form format, set summary_type=refine" validate_service \ @@ -321,7 +331,8 @@ function validate_megaservice_long_text() { "messages=" \ "files=@$ROOT_FOLDER/data/long.txt" \ "max_tokens=128" \ - "summary_type=refine" + "summary_type=refine" \ + "stream=False" } function stop_docker() { From 180d55ec3048a1d7b05207b7e94ad4b288442f84 Mon Sep 17 00:00:00 2001 From: letonghan Date: Wed, 26 Mar 2025 09:32:03 +0800 Subject: [PATCH 07/12] fix typo Signed-off-by: letonghan --- DocSum/tests/test_compose_tgi_on_xeon.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index 16986b18ea..e269e9c568 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -247,7 +247,7 @@ function validate_megaservice_multimedia() { "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "json" \ - "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"s}" + "{\"type\": \"video\", \"messages\": \"$(input_data_for_test "video")\", \"stream\": \"False\"}" echo ">>> Checking video data in form format" validate_service \ From a12ccd5ce6bad8e8c7e64b0bc67617d48746afdc Mon Sep 17 00:00:00 2001 From: letonghan Date: Thu, 27 Mar 2025 21:51:25 +0800 Subject: [PATCH 08/12] modify to short file for type=stuff in test scripts Signed-off-by: letonghan --- DocSum/tests/test_compose_on_gaudi.sh | 2 +- DocSum/tests/test_compose_on_xeon.sh | 2 +- DocSum/tests/test_compose_tgi_on_gaudi.sh | 2 +- DocSum/tests/test_compose_tgi_on_xeon.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index c1b6a52a5c..d9a608a83a 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -297,7 +297,7 @@ function validate_megaservice_long_text() { "media" "" \ "type=text" \ "messages=" \ - "files=@$ROOT_FOLDER/data/long.txt" \ + "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=128" \ "summary_type=stuff" \ "stream=False" diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index 4402475542..d8447ee550 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -294,7 +294,7 @@ function validate_megaservice_long_text() { "media" "" \ "type=text" \ "messages=" \ - "files=@$ROOT_FOLDER/data/long.txt" \ + "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=128" \ "summary_type=stuff" \ "stream=False" diff --git a/DocSum/tests/test_compose_tgi_on_gaudi.sh b/DocSum/tests/test_compose_tgi_on_gaudi.sh index b297108db8..ebe1177709 100644 --- a/DocSum/tests/test_compose_tgi_on_gaudi.sh +++ b/DocSum/tests/test_compose_tgi_on_gaudi.sh @@ -288,7 +288,7 @@ function validate_megaservice_long_text() { "media" "" \ "type=text" \ "messages=" \ - "files=@$ROOT_FOLDER/data/long.txt" \ + "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=128" \ "summary_type=stuff" \ "stream=False" diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index e269e9c568..967d60b5eb 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -287,7 +287,7 @@ function validate_megaservice_long_text() { "media" "" \ "type=text" \ "messages=" \ - "files=@$ROOT_FOLDER/data/long.txt" \ + "files=@$ROOT_FOLDER/data/short.txt" \ "max_tokens=128" \ "summary_type=stuff" \ "stream=False" From 0471f96ddf4d8a2da45beb6c785706004ed1739a Mon Sep 17 00:00:00 2001 From: letonghan Date: Thu, 27 Mar 2025 22:43:04 +0800 Subject: [PATCH 09/12] update expected results Signed-off-by: letonghan --- DocSum/tests/test_compose_on_gaudi.sh | 2 +- DocSum/tests/test_compose_on_xeon.sh | 2 +- DocSum/tests/test_compose_tgi_on_gaudi.sh | 2 +- DocSum/tests/test_compose_tgi_on_xeon.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DocSum/tests/test_compose_on_gaudi.sh b/DocSum/tests/test_compose_on_gaudi.sh index d9a608a83a..d3653d0e93 100644 --- a/DocSum/tests/test_compose_on_gaudi.sh +++ b/DocSum/tests/test_compose_on_gaudi.sh @@ -291,7 +291,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "Intel" \ + "TEI" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ diff --git a/DocSum/tests/test_compose_on_xeon.sh b/DocSum/tests/test_compose_on_xeon.sh index d8447ee550..090084047e 100644 --- a/DocSum/tests/test_compose_on_xeon.sh +++ b/DocSum/tests/test_compose_on_xeon.sh @@ -288,7 +288,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "Intel" \ + "TEI" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ diff --git a/DocSum/tests/test_compose_tgi_on_gaudi.sh b/DocSum/tests/test_compose_tgi_on_gaudi.sh index ebe1177709..3f21cdee82 100644 --- a/DocSum/tests/test_compose_tgi_on_gaudi.sh +++ b/DocSum/tests/test_compose_tgi_on_gaudi.sh @@ -282,7 +282,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "Intel" \ + "TEI" \ "docsum-gaudi-backend-server" \ "docsum-gaudi-backend-server" \ "media" "" \ diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index 967d60b5eb..17f7b04cb0 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -295,7 +295,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "Intel" \ + "TEI" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ From aae336ff7959feecb72a9d291ce8b0f8e8adcec4 Mon Sep 17 00:00:00 2001 From: letonghan Date: Thu, 27 Mar 2025 23:01:37 +0800 Subject: [PATCH 10/12] fix typo Signed-off-by: letonghan --- DocSum/tests/test_compose_tgi_on_xeon.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DocSum/tests/test_compose_tgi_on_xeon.sh b/DocSum/tests/test_compose_tgi_on_xeon.sh index 17f7b04cb0..3d7b3f1b22 100644 --- a/DocSum/tests/test_compose_tgi_on_xeon.sh +++ b/DocSum/tests/test_compose_tgi_on_xeon.sh @@ -281,7 +281,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=stuff" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "Intel" \ + "TEI" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ @@ -295,7 +295,7 @@ function validate_megaservice_long_text() { echo ">>> Checking long text data in form format, set summary_type=truncate" validate_service \ "${host_ip}:${BACKEND_SERVICE_PORT}/v1/docsum" \ - "TEI" \ + "Intel" \ "docsum-xeon-backend-server" \ "docsum-xeon-backend-server" \ "media" "" \ From 49c8074652f8bf75271aea627c9acccade4dc922 Mon Sep 17 00:00:00 2001 From: letonghan Date: Fri, 28 Mar 2025 15:59:51 +0800 Subject: [PATCH 11/12] fix typo in readme Signed-off-by: letonghan --- DocSum/docker_compose/intel/hpu/gaudi/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DocSum/docker_compose/intel/hpu/gaudi/README.md b/DocSum/docker_compose/intel/hpu/gaudi/README.md index e2dc4c3da7..8d947554fd 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/README.md +++ b/DocSum/docker_compose/intel/hpu/gaudi/README.md @@ -109,7 +109,7 @@ To set up environment variables for deploying Document Summarization services, f ### Start Microservice Docker Containers ```bash -cd GenAIExamples/DocSum/docker_compose/intel/cpu/gaudi +cd GenAIExamples/DocSum/docker_compose/intel/hpu/gaudi ``` If use vLLM as the LLM serving backend. From 15777e052cd5a6ffdb14a8a055c286c19701dbdb Mon Sep 17 00:00:00 2001 From: letonghan Date: Fri, 28 Mar 2025 16:23:34 +0800 Subject: [PATCH 12/12] refine healthcheck endpoint to localhost:80 Signed-off-by: letonghan --- DocSum/docker_compose/intel/cpu/xeon/compose.yaml | 2 +- DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml | 2 +- DocSum/docker_compose/intel/hpu/gaudi/compose.yaml | 2 +- DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml index e6425b2b92..8eb3bb28b6 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose.yaml @@ -18,7 +18,7 @@ services: LLM_MODEL_ID: ${LLM_MODEL_ID} VLLM_TORCH_PROFILER_DIR: "/mnt" healthcheck: - test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] interval: 10s timeout: 10s retries: 100 diff --git a/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml b/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml index 7499b97501..4b0362bd09 100644 --- a/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml +++ b/DocSum/docker_compose/intel/cpu/xeon/compose_tgi.yaml @@ -17,7 +17,7 @@ services: HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} host_ip: ${host_ip} healthcheck: - test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] interval: 10s timeout: 10s retries: 100 diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml index 7a0a104af9..681c3e7dd9 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose.yaml @@ -20,7 +20,7 @@ services: NUM_CARDS: ${NUM_CARDS} VLLM_TORCH_PROFILER_DIR: "/mnt" healthcheck: - test: ["CMD-SHELL", "curl -f http://$host_ip:8008/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] interval: 10s timeout: 10s retries: 100 diff --git a/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml b/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml index 34516be195..01008de27a 100644 --- a/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml +++ b/DocSum/docker_compose/intel/hpu/gaudi/compose_tgi.yaml @@ -29,7 +29,7 @@ services: - SYS_NICE ipc: host healthcheck: - test: ["CMD-SHELL", "curl -f http://${host_ip}:${LLM_ENDPOINT_PORT}/health || exit 1"] + test: ["CMD-SHELL", "curl -f http://localhost:80/health || exit 1"] interval: 10s timeout: 10s retries: 100