diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/compose.yaml b/ChatQnA/docker_compose/amd/gpu/rocm/compose.yaml index a71fcc830a..14f2eb3312 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/compose.yaml +++ b/ChatQnA/docker_compose/amd/gpu/rocm/compose.yaml @@ -16,7 +16,7 @@ services: - chatqna-redis-vector-db - chatqna-tei-embedding-service ports: - - "${CHATQNA_REDIS_DATAPREP_PORT}:5000" + - "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen.yaml b/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen.yaml index 161bb4589f..df2a9a42a3 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen.yaml +++ b/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen.yaml @@ -16,7 +16,7 @@ services: - chatqna-redis-vector-db - chatqna-tei-embedding-service ports: - - "${CHATQNA_REDIS_DATAPREP_PORT}:5000" + - "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen_vllm.yaml b/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen_vllm.yaml index b89b367e29..fa1f70d74f 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen_vllm.yaml +++ b/ChatQnA/docker_compose/amd/gpu/rocm/compose_faqgen_vllm.yaml @@ -16,7 +16,7 @@ services: - chatqna-redis-vector-db - chatqna-tei-embedding-service ports: - - "${CHATQNA_REDIS_DATAPREP_PORT}:5000" + - "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/compose_vllm.yaml b/ChatQnA/docker_compose/amd/gpu/rocm/compose_vllm.yaml index d95ec39e92..0dbbfab0d5 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/compose_vllm.yaml +++ b/ChatQnA/docker_compose/amd/gpu/rocm/compose_vllm.yaml @@ -16,7 +16,7 @@ services: - chatqna-redis-vector-db - chatqna-tei-embedding-service ports: - - "${CHATQNA_REDIS_DATAPREP_PORT:-5000}:5000" + - "${CHATQNA_REDIS_DATAPREP_PORT:-18103}:5000" environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/set_env.sh b/ChatQnA/docker_compose/amd/gpu/rocm/set_env.sh index 5691d8fa48..5fcdad0a06 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/set_env.sh +++ b/ChatQnA/docker_compose/amd/gpu/rocm/set_env.sh @@ -2,17 +2,17 @@ # Copyright (C) 2025 Advanced Micro Devices, Inc. -export HOST_IP='' -export HOST_IP_EXTERNAL='' +export HOST_IP=${ip_address} +export HOST_IP_EXTERNAL=${ip_address} export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" -export CHATQNA_BACKEND_SERVICE_PORT=18102 -export CHATQNA_FRONTEND_SERVICE_PORT=18101 -export CHATQNA_NGINX_PORT=18104 +export CHATQNA_BACKEND_SERVICE_PORT=8888 +export CHATQNA_FRONTEND_SERVICE_PORT=5173 +export CHATQNA_NGINX_PORT=80 export CHATQNA_REDIS_DATAPREP_PORT=18103 export CHATQNA_REDIS_RETRIEVER_PORT=7000 export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen.sh b/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen.sh index 6361f5a9fd..543119eadc 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen.sh +++ b/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen.sh @@ -2,18 +2,18 @@ # Copyright (C) 2025 Advanced Micro Devices, Inc. -export HOST_IP='' -export HOST_IP_EXTERNAL='' +export HOST_IP=${ip_address} +export HOST_IP_EXTERNAL=${ip_address} export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" -export CHATQNA_BACKEND_SERVICE_PORT=18102 -export CHATQNA_FRONTEND_SERVICE_PORT=18101 +export CHATQNA_BACKEND_SERVICE_PORT=8888 +export CHATQNA_FRONTEND_SERVICE_PORT=5173 export CHATQNA_LLM_FAQGEN_PORT=18011 -export CHATQNA_NGINX_PORT=18104 +export CHATQNA_NGINX_PORT=80 export CHATQNA_REDIS_DATAPREP_PORT=18103 export CHATQNA_REDIS_RETRIEVER_PORT=7000 export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh b/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh index 20dd880b2d..d2462d2646 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh +++ b/ChatQnA/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh @@ -2,18 +2,18 @@ # Copyright (C) 2025 Advanced Micro Devices, Inc. -export HOST_IP='' -export HOST_IP_EXTERNAL='' +export HOST_IP=${ip_address} +export HOST_IP_EXTERNAL=${ip_address} export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" -export CHATQNA_BACKEND_SERVICE_PORT=18102 -export CHATQNA_FRONTEND_SERVICE_PORT=18101 +export CHATQNA_BACKEND_SERVICE_PORT=8888 +export CHATQNA_FRONTEND_SERVICE_PORT=5173 export CHATQNA_LLM_FAQGEN_PORT=18011 -export CHATQNA_NGINX_PORT=18104 +export CHATQNA_NGINX_PORT=80 export CHATQNA_REDIS_DATAPREP_PORT=18103 export CHATQNA_REDIS_RETRIEVER_PORT=7000 export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 diff --git a/ChatQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh b/ChatQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh index 2d1c3920fd..0000b233e1 100644 --- a/ChatQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh +++ b/ChatQnA/docker_compose/amd/gpu/rocm/set_env_vllm.sh @@ -2,17 +2,17 @@ # Copyright (C) 2025 Advanced Micro Devices, Inc. -export HOST_IP='' -export HOST_IP_EXTERNAL='' +export HOST_IP=${ip_address} +export HOST_IP_EXTERNAL=${ip_address} export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" -export CHATQNA_BACKEND_SERVICE_PORT=18102 -export CHATQNA_FRONTEND_SERVICE_PORT=18101 -export CHATQNA_NGINX_PORT=18104 +export CHATQNA_BACKEND_SERVICE_PORT=8888 +export CHATQNA_FRONTEND_SERVICE_PORT=5173 +export CHATQNA_NGINX_PORT=80 export CHATQNA_REDIS_DATAPREP_PORT=18103 export CHATQNA_REDIS_RETRIEVER_PORT=7000 export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/grafana/dashboards/download_opea_dashboard.sh b/ChatQnA/docker_compose/intel/cpu/xeon/grafana/dashboards/download_opea_dashboard.sh index 079cabf6a4..c3739ec705 100644 --- a/ChatQnA/docker_compose/intel/cpu/xeon/grafana/dashboards/download_opea_dashboard.sh +++ b/ChatQnA/docker_compose/intel/cpu/xeon/grafana/dashboards/download_opea_dashboard.sh @@ -1,6 +1,8 @@ # Copyright (C) 2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -rm *.json +if ls *.json 1> /dev/null 2>&1; then + rm *.json +fi wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/chatqna_megaservice_grafana.json wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/qdrant_grafana.json wget https://raw.githubusercontent.com/opea-project/GenAIEval/refs/heads/main/evals/benchmark/grafana/milvus_grafana.json diff --git a/ChatQnA/docker_compose/intel/cpu/xeon/set_env.sh b/ChatQnA/docker_compose/intel/cpu/xeon/set_env.sh index 711e27e058..2959f94321 100755 --- a/ChatQnA/docker_compose/intel/cpu/xeon/set_env.sh +++ b/ChatQnA/docker_compose/intel/cpu/xeon/set_env.sh @@ -7,6 +7,9 @@ pushd "../../../../../" > /dev/null source .set_env.sh popd > /dev/null +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export HF_TOKEN=${HF_TOKEN} +export host_ip=${ip_address} export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export RERANK_MODEL_ID="BAAI/bge-reranker-base" export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/README.md b/ChatQnA/docker_compose/intel/hpu/gaudi/README.md index 6dea162563..4de795be43 100644 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/README.md +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/README.md @@ -43,7 +43,7 @@ Some HuggingFace resources, such as some models, are only accessible if you have ### Configure the Deployment Environment -To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory: +To set up environment variables for deploying ChatQnA services, source the _setup_env.sh_ script in this directory (If using faqgen or guardrails, source the _set_env_faqgen.sh_): ``` source ./set_env.sh diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/set_env.sh b/ChatQnA/docker_compose/intel/hpu/gaudi/set_env.sh index 0b55559f5e..fe847e6036 100755 --- a/ChatQnA/docker_compose/intel/hpu/gaudi/set_env.sh +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/set_env.sh @@ -4,12 +4,20 @@ # SPDX-License-Identifier: Apache-2.0 # Function to prompt for input and set environment variables +NON_INTERACTIVE=${NON_INTERACTIVE:-false} + prompt_for_env_var() { local var_name="$1" local prompt_message="$2" local default_value="$3" local mandatory="$4" + if [[ "$NON_INTERACTIVE" == "true" ]]; then + echo "Non-interactive environment detected. Setting $var_name to default: $default_value" + export "$var_name"="$default_value" + return + fi + if [[ "$mandatory" == "true" ]]; then while [[ -z "$value" ]]; do read -p "$prompt_message [default: \"${default_value}\"]: " value @@ -34,7 +42,7 @@ popd > /dev/null # Prompt the user for each required environment variable prompt_for_env_var "EMBEDDING_MODEL_ID" "Enter the EMBEDDING_MODEL_ID" "BAAI/bge-base-en-v1.5" false -prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "" true +prompt_for_env_var "HUGGINGFACEHUB_API_TOKEN" "Enter the HUGGINGFACEHUB_API_TOKEN" "${HF_TOKEN}" true prompt_for_env_var "RERANK_MODEL_ID" "Enter the RERANK_MODEL_ID" "BAAI/bge-reranker-base" false prompt_for_env_var "LLM_MODEL_ID" "Enter the LLM_MODEL_ID" "meta-llama/Meta-Llama-3-8B-Instruct" false prompt_for_env_var "INDEX_NAME" "Enter the INDEX_NAME" "rag-redis" false @@ -42,34 +50,40 @@ prompt_for_env_var "NUM_CARDS" "Enter the number of Gaudi devices" "1" false prompt_for_env_var "host_ip" "Enter the host_ip" "$(curl ifconfig.me)" false #Query for enabling http_proxy -prompt_for_env_var "http_proxy" "Enter the http_proxy." "" false +prompt_for_env_var "http_proxy" "Enter the http_proxy." "${http_proxy}" false #Query for enabling https_proxy -prompt_for_env_var "https_proxy" "Enter the https_proxy." "" false +prompt_for_env_var "http_proxy" "Enter the http_proxy." "${https_proxy}" false #Query for enabling no_proxy -prompt_for_env_var "no_proxy" "Enter the no_proxy." "" false +prompt_for_env_var "no_proxy" "Enter the no_proxy." "${no_proxy}" false # Query for enabling logging -read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]') -if [[ "$logging" == "yes" || "$logging" == "y" ]]; then - export LOGFLAG=true -else - export LOGFLAG=false -fi - -# Query for enabling OpenTelemetry Tracing Endpoint -read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]') -if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then - export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') - export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 - export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces - telemetry_flag=true - pushd "grafana/dashboards" > /dev/null - source download_opea_dashboard.sh - popd > /dev/null +if [[ "$NON_INTERACTIVE" == "true" ]]; then + # Query for enabling logging + prompt_for_env_var "LOGFLAG" "Enable logging? (yes/no): " "true" false + export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') + export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 + export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces + telemetry_flag=true else - telemetry_flag=false + # Query for enabling logging + read -p "Enable logging? (yes/no): " logging && logging=$(echo "$logging" | tr '[:upper:]' '[:lower:]') + if [[ "$logging" == "yes" || "$logging" == "y" ]]; then + export LOGFLAG=true + else + export LOGFLAG=false + fi + # Query for enabling OpenTelemetry Tracing Endpoint + read -p "Enable OpenTelemetry Tracing Endpoint? (yes/no): " telemetry && telemetry=$(echo "$telemetry" | tr '[:upper:]' '[:lower:]') + if [[ "$telemetry" == "yes" || "$telemetry" == "y" ]]; then + export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') + export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 + export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces + telemetry_flag=true + else + telemetry_flag=false + fi fi # Generate the .env file diff --git a/ChatQnA/docker_compose/intel/hpu/gaudi/set_env_faqgen.sh b/ChatQnA/docker_compose/intel/hpu/gaudi/set_env_faqgen.sh new file mode 100755 index 0000000000..fde0b35fd0 --- /dev/null +++ b/ChatQnA/docker_compose/intel/hpu/gaudi/set_env_faqgen.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +pushd "../../../../../" > /dev/null +source .set_env.sh +popd > /dev/null + +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} +export HF_TOKEN=${HF_TOKEN} +export host_ip=${ip_address} +export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" +export RERANK_MODEL_ID="BAAI/bge-reranker-base" +export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" +export INDEX_NAME="rag-redis" +export NUM_CARDS=1 +export VLLM_SKIP_WARMUP=true +export LOGFLAG=True +export http_proxy=${http_proxy} +export https_proxy=${https_proxy} +export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server" + +export LLM_ENDPOINT_PORT=8010 +export LLM_SERVER_PORT=9001 +export CHATQNA_BACKEND_PORT=8888 +export CHATQNA_REDIS_VECTOR_PORT=6377 +export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006 +export CHATQNA_FRONTEND_SERVICE_PORT=5175 +export NGINX_PORT=80 +export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM" +export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" diff --git a/ChatQnA/tests/README.md b/ChatQnA/tests/README.md new file mode 100644 index 0000000000..c622008650 --- /dev/null +++ b/ChatQnA/tests/README.md @@ -0,0 +1,123 @@ +# ChatQnA E2E test scripts + +## Set the required environment variable + +```bash +export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" +``` + +## Run test + +On Intel Xeon with TGI: + +```bash +bash test_compose_tgi_on_xeon.sh +``` + +On Intel Xeon with vLLM: + +```bash +bash test_compose_on_xeon.sh +``` + +On Intel Xeon with MariaDB Vector: + +```bash +bash test_compose_mariadb_on_xeon.sh +``` + +On Intel Xeon with Pinecone: + +```bash +bash test_compose_pinecone_on_xeon.sh +``` + +On Intel Xeon with Milvus + +```bash +bash test_compose_milvus_on_xeon.sh +``` + +On Intel Xeon with Qdrant + +```bash +bash test_compose_qdrant_on_xeon.sh +``` + +On Intel Xeon without Rerank: + +```bash +bash test_compose_without_rerank_on_xeon.sh +``` + +On Intel Gaudi with TGI: + +```bash +bash test_compose_tgi_on_gaudi.sh +``` + +On Intel Gaudi with vLLM: + +```bash +bash test_compose_on_gaudi.sh +``` + +On Intel Gaudi with Guardrails: + +```bash +bash test_compose_guardrails_on_gaudi.sh +``` + +On Intel Gaudi without Rerank: + +```bash +bash test_compose_without_rerank_on_gaudi.sh +``` + +On AMD ROCm with TGI: + +```bash +bash test_compose_on_rocm.sh +``` + +On AMD ROCm with vLLM: + +```bash +bash test_compose_vllm_on_rocm.sh +``` + +Test FAQ Generation On Intel Xeon with TGI: + +```bash +bash test_compose_faqgen_tgi_on_xeon.sh +``` + +Test FAQ Generation On Intel Xeon with vLLM: + +```bash +bash test_compose_faqgen_on_xeon.sh +``` + +Test FAQ Generation On Intel Gaudi with TGI: + +```bash +bash test_compose_faqgen_tgi_on_gaudi.sh +``` + +Test FAQ Generation On Intel Gaudi with vLLM: + +```bash +bash test_compose_faqgen_on_gaudi.sh +``` + +Test FAQ Generation On AMD ROCm with TGI: + +```bash +bash test_compose_faqgen_on_rocm.sh +``` + +Test FAQ Generation On AMD ROCm with vLLM: + +```bash +bash test_compose_faqgen_vllm_on_rocm.sh +``` diff --git a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh index 2a30dbb773..fc95182346 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_gaudi.sh @@ -36,27 +36,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export NUM_CARDS=1 - export INDEX_NAME="rag-redis" - export host_ip=${ip_address} - export LLM_ENDPOINT_PORT=8010 - export LLM_SERVER_PORT=9001 - export CHATQNA_BACKEND_PORT=8888 - export CHATQNA_REDIS_VECTOR_PORT=6377 - export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006 - export CHATQNA_FRONTEND_SERVICE_PORT=5175 - export NGINX_PORT=80 - export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM" - export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" - export HF_TOKEN=${HF_TOKEN} - export VLLM_SKIP_WARMUP=true - export LOGFLAG=True - export http_proxy=${http_proxy} - export https_proxy=${https_proxy} - export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server" + source set_env_faqgen.sh # Start Docker Containers docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_faqgen_on_rocm.sh b/ChatQnA/tests/test_compose_faqgen_on_rocm.sh index 7b05bb8c06..a874ca3e90 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_rocm.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_rocm.sh @@ -15,44 +15,7 @@ WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') -export HOST_IP=${ip_address} -export HOST_IP_EXTERNAL=${ip_address} - -export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" -export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" -export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" - -export CHATQNA_BACKEND_SERVICE_PORT=8888 -export CHATQNA_FRONTEND_SERVICE_PORT=5173 -export CHATQNA_LLM_FAQGEN_PORT=18011 -export CHATQNA_NGINX_PORT=80 -export CHATQNA_REDIS_DATAPREP_PORT=18103 -export CHATQNA_REDIS_RETRIEVER_PORT=7000 -export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 -export CHATQNA_REDIS_VECTOR_PORT=6379 -export CHATQNA_TEI_EMBEDDING_PORT=18090 -export CHATQNA_TEI_RERANKING_PORT=18808 -export CHATQNA_TGI_SERVICE_PORT=18008 - -export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna" -export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP} -export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete" -export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get" -export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest" -export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP} -export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_TGI_SERVICE_PORT}" -export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}" -export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}" - -export CHATQNA_BACKEND_SERVICE_NAME=chatqna -export CHATQNA_INDEX_NAME="rag-redis" -export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" +source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen.sh export PATH="~/miniconda3/bin:$PATH" diff --git a/ChatQnA/tests/test_compose_faqgen_on_xeon.sh b/ChatQnA/tests/test_compose_faqgen_on_xeon.sh index dc42798732..e9a15842ab 100644 --- a/ChatQnA/tests/test_compose_faqgen_on_xeon.sh +++ b/ChatQnA/tests/test_compose_faqgen_on_xeon.sh @@ -37,26 +37,16 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export INDEX_NAME="rag-redis" - export host_ip=${ip_address} - export LLM_ENDPOINT_PORT=8010 export LLM_SERVER_PORT=9001 - export CHATQNA_BACKEND_PORT=8888 export CHATQNA_REDIS_VECTOR_PORT=6377 export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006 export CHATQNA_FRONTEND_SERVICE_PORT=5175 - export NGINX_PORT=80 - export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM" - export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" - export HF_TOKEN=${HF_TOKEN} export VLLM_SKIP_WARMUP=true export LOGFLAG=True export http_proxy=${http_proxy} export https_proxy=${https_proxy} export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server" + source set_env.sh # Start Docker Containers docker compose -f compose_faqgen.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh index 8eef9c6040..563de7eb6a 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_gaudi.sh @@ -33,25 +33,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export INDEX_NAME="rag-redis" - export host_ip=${ip_address} - export LLM_ENDPOINT_PORT=8010 - export LLM_SERVER_PORT=9001 - export CHATQNA_BACKEND_PORT=8888 - export CHATQNA_REDIS_VECTOR_PORT=6377 - export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006 - export CHATQNA_FRONTEND_SERVICE_PORT=5175 - export NGINX_PORT=80 export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" - export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" - export HF_TOKEN=${HF_TOKEN} - export LOGFLAG=True - export http_proxy=${http_proxy} - export https_proxy=${https_proxy} - export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-gaudi-backend-server,chatqna-gaudi-ui-server,chatqna-gaudi-nginx-server" + source set_env_faqgen.sh # Start Docker Containers docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh b/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh index d4140a3ab1..44cdc03ceb 100644 --- a/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh +++ b/ChatQnA/tests/test_compose_faqgen_tgi_on_xeon.sh @@ -37,25 +37,16 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export INDEX_NAME="rag-redis" - export host_ip=${ip_address} - export LLM_ENDPOINT_PORT=8010 export LLM_SERVER_PORT=9001 - export CHATQNA_BACKEND_PORT=8888 export CHATQNA_REDIS_VECTOR_PORT=6377 export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8006 export CHATQNA_FRONTEND_SERVICE_PORT=5175 - export NGINX_PORT=80 export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" - export LLM_ENDPOINT="http://${host_ip}:${LLM_ENDPOINT_PORT}" - export HF_TOKEN=${HF_TOKEN} export LOGFLAG=True export http_proxy=${http_proxy} export https_proxy=${https_proxy} export no_proxy="${ip_address},redis-vector-db,dataprep-redis-service,tei-embedding-service,retriever,tei-reranking-service,tgi-service,vllm-service,guardrails,llm-faqgen,chatqna-xeon-backend-server,chatqna-xeon-ui-server,chatqna-xeon-nginx-server" + source set_env.sh # Start Docker Containers docker compose -f compose_faqgen_tgi.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh b/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh index 83e71da34e..774aca814d 100644 --- a/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh +++ b/ChatQnA/tests/test_compose_faqgen_vllm_on_rocm.sh @@ -14,41 +14,7 @@ WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') -export HOST_IP=${ip_address} -export HOST_IP_EXTERNAL=${ip_address} - -export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" -export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" -export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" - -export CHATQNA_BACKEND_SERVICE_PORT=8888 -export CHATQNA_FRONTEND_SERVICE_PORT=5173 -export CHATQNA_LLM_FAQGEN_PORT=18011 -export CHATQNA_NGINX_PORT=80 -export CHATQNA_REDIS_DATAPREP_PORT=18103 -export CHATQNA_REDIS_RETRIEVER_PORT=7000 -export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 -export CHATQNA_REDIS_VECTOR_PORT=6379 -export CHATQNA_TEI_EMBEDDING_PORT=18090 -export CHATQNA_TEI_RERANKING_PORT=18808 -export CHATQNA_VLLM_SERVICE_PORT=18008 - -export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna" -export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL} -export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete" -export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get" -export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest" -export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP} -export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}" -export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}" -export LLM_ENDPOINT="http://${HOST_IP}:${CHATQNA_VLLM_SERVICE_PORT}" - -export CHATQNA_BACKEND_SERVICE_NAME=chatqna -export CHATQNA_INDEX_NAME="rag-redis" -export CHATQNA_TYPE="CHATQNA_FAQGEN" -export FAQGen_COMPONENT_NAME="OpeaFaqGenvLLM" +source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_faqgen_vllm.sh function build_docker_images() { opea_branch=${opea_branch:-"main"} diff --git a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh index f9057f6ec0..c24a0c537f 100644 --- a/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_guardrails_on_gaudi.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -36,14 +36,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export NUM_CARDS=1 - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export host_ip=${ip_address} export GURADRAILS_MODEL_ID="meta-llama/Meta-Llama-Guard-2-8B" + source set_env_faqgen.sh # Start Docker Containers docker compose -f compose_guardrails.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_mariadb_on_xeon.sh b/ChatQnA/tests/test_compose_mariadb_on_xeon.sh index 412e32626a..61581bfd28 100644 --- a/ChatQnA/tests/test_compose_mariadb_on_xeon.sh +++ b/ChatQnA/tests/test_compose_mariadb_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2025 MariaDB Foundation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -39,14 +39,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export MARIADB_DATABASE="vectordb" - export MARIADB_USER="chatqna" export MARIADB_PASSWORD="test" - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export host_ip=${ip_address} + source set_env_mariadb.sh # Start Docker Containers docker compose -f compose_mariadb.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_milvus_on_xeon.sh b/ChatQnA/tests/test_compose_milvus_on_xeon.sh index 47a5b43ddc..5316fd08fc 100644 --- a/ChatQnA/tests/test_compose_milvus_on_xeon.sh +++ b/ChatQnA/tests/test_compose_milvus_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -39,11 +39,8 @@ function build_docker_images() { } function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export LOGFLAG=true + source set_env.sh # Start Docker Containers docker compose -f compose_milvus.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_on_gaudi.sh b/ChatQnA/tests/test_compose_on_gaudi.sh index 144f541907..857d25ce05 100644 --- a/ChatQnA/tests/test_compose_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_on_gaudi.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -36,16 +36,10 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export NUM_CARDS=1 - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export NON_INTERACTIVE=true export host_ip=${ip_address} - export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') - export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 - export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces + export telemetry=yes + source set_env.sh # Start Docker Containers docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_on_rocm.sh b/ChatQnA/tests/test_compose_on_rocm.sh index 3ff91522c8..a36aecf68e 100644 --- a/ChatQnA/tests/test_compose_on_rocm.sh +++ b/ChatQnA/tests/test_compose_on_rocm.sh @@ -15,41 +15,7 @@ WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') -export HOST_IP=${ip_address} -export HOST_IP_EXTERNAL=${ip_address} - -export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" -export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" -export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" - -export CHATQNA_BACKEND_SERVICE_PORT=8888 -export CHATQNA_FRONTEND_SERVICE_PORT=5173 -export CHATQNA_NGINX_PORT=80 -export CHATQNA_REDIS_DATAPREP_PORT=18103 -export CHATQNA_REDIS_RETRIEVER_PORT=7000 -export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 -export CHATQNA_REDIS_VECTOR_PORT=6379 -export CHATQNA_TEI_EMBEDDING_PORT=18090 -export CHATQNA_TEI_RERANKING_PORT=18808 -export CHATQNA_TGI_SERVICE_PORT=18008 - -export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna" -export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP} -export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete" -export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get" -export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest" -export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP} -export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}" -export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}" - -export CHATQNA_BACKEND_SERVICE_NAME=chatqna -export CHATQNA_INDEX_NAME="rag-redis" +source $WORKPATH/docker_compose/amd/gpu/rocm/set_env.sh export PATH="~/miniconda3/bin:$PATH" diff --git a/ChatQnA/tests/test_compose_on_xeon.sh b/ChatQnA/tests/test_compose_on_xeon.sh index 38226ec9be..89357285c8 100644 --- a/ChatQnA/tests/test_compose_on_xeon.sh +++ b/ChatQnA/tests/test_compose_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -40,15 +40,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export host_ip=${ip_address} - export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') - export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 - export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces + source set_env.sh # Start Docker Containers docker compose -f compose.yaml -f compose.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_pinecone_on_xeon.sh b/ChatQnA/tests/test_compose_pinecone_on_xeon.sh index 98bfd21368..e02b13637d 100755 --- a/ChatQnA/tests/test_compose_pinecone_on_xeon.sh +++ b/ChatQnA/tests/test_compose_pinecone_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -41,14 +41,11 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ export no_proxy=${no_proxy},${ip_address} - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export PINECONE_API_KEY=${PINECONE_KEY_LANGCHAIN_TEST} export PINECONE_INDEX_NAME="langchain-test" export INDEX_NAME="langchain-test" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export LOGFLAG=true + source set_env.sh # Start Docker Containers docker compose -f compose_pinecone.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh index f2a30be1e7..594deec288 100644 --- a/ChatQnA/tests/test_compose_qdrant_on_xeon.sh +++ b/ChatQnA/tests/test_compose_qdrant_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -40,11 +40,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" export INDEX_NAME="rag-qdrant" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + source set_env.sh sed -i "s/backend_address/$ip_address/g" $WORKPATH/ui/svelte/.env diff --git a/ChatQnA/tests/test_compose_tgi_on_gaudi.sh b/ChatQnA/tests/test_compose_tgi_on_gaudi.sh index b334fc35c8..7ab0565a3c 100644 --- a/ChatQnA/tests/test_compose_tgi_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_tgi_on_gaudi.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -32,15 +32,10 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export NUM_CARDS=1 - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') - export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 - export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces + export NON_INTERACTIVE=true + export host_ip=${ip_address} + export telemetry=yes + source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_tgi_on_xeon.sh b/ChatQnA/tests/test_compose_tgi_on_xeon.sh index 12c9552ca5..c00fa861aa 100644 --- a/ChatQnA/tests/test_compose_tgi_on_xeon.sh +++ b/ChatQnA/tests/test_compose_tgi_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -33,14 +33,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_MODEL_ID="BAAI/bge-reranker-base" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export JAEGER_IP=$(ip route get 8.8.8.8 | grep -oP 'src \K[^ ]+') - export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 - export TELEMETRY_ENDPOINT=http://$JAEGER_IP:4318/v1/traces + source set_env.sh # Start Docker Containers docker compose -f compose_tgi.yaml -f compose_tgi.telemetry.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_vllm_on_rocm.sh b/ChatQnA/tests/test_compose_vllm_on_rocm.sh index 622c90ece9..992c4f4aac 100644 --- a/ChatQnA/tests/test_compose_vllm_on_rocm.sh +++ b/ChatQnA/tests/test_compose_vllm_on_rocm.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -14,42 +14,7 @@ WORKPATH=$(dirname "$PWD") LOG_PATH="$WORKPATH/tests" ip_address=$(hostname -I | awk '{print $1}') -export HOST_IP=${ip_address} -export HOST_IP_EXTERNAL=${ip_address} - -export CHATQNA_EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" -export CHATQNA_HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -export CHATQNA_LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" -export CHATQNA_RERANK_MODEL_ID="BAAI/bge-reranker-base" - -export CHATQNA_BACKEND_SERVICE_PORT=8888 -export CHATQNA_FRONTEND_SERVICE_PORT=5173 -export CHATQNA_NGINX_PORT=80 -export CHATQNA_REDIS_DATAPREP_PORT=18103 -export CHATQNA_REDIS_RETRIEVER_PORT=7000 -export CHATQNA_REDIS_VECTOR_INSIGHT_PORT=8001 -export CHATQNA_REDIS_VECTOR_PORT=6379 -export CHATQNA_TEI_EMBEDDING_PORT=18090 -export CHATQNA_TEI_RERANKING_PORT=18808 -export CHATQNA_VLLM_SERVICE_PORT=18008 - -export CHATQNA_BACKEND_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_BACKEND_SERVICE_PORT}/v1/chatqna" -export CHATQNA_BACKEND_SERVICE_IP=${HOST_IP_EXTERNAL} -export CHATQNA_DATAPREP_DELETE_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/delete" -export CHATQNA_DATAPREP_GET_FILE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/get" -export CHATQNA_DATAPREP_SERVICE_ENDPOINT="http://${HOST_IP_EXTERNAL}:${CHATQNA_REDIS_DATAPREP_PORT}/v1/dataprep/ingest" -export CHATQNA_EMBEDDING_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_FRONTEND_SERVICE_IP=${HOST_IP} -export CHATQNA_LLM_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_MEGA_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_REDIS_URL="redis://${HOST_IP}:${CHATQNA_REDIS_VECTOR_PORT}" -export CHATQNA_RERANK_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_RETRIEVER_SERVICE_HOST_IP=${HOST_IP} -export CHATQNA_TEI_EMBEDDING_ENDPOINT="http://${HOST_IP}:${CHATQNA_TEI_EMBEDDING_PORT}" - -export CHATQNA_BACKEND_SERVICE_NAME=chatqna -export CHATQNA_INDEX_NAME="rag-redis" - +source $WORKPATH/docker_compose/amd/gpu/rocm/set_env_vllm.sh function build_docker_images() { opea_branch=${opea_branch:-"main"} diff --git a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh index 7d6837402f..a1ee6922c0 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_gaudi.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -36,11 +36,8 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/hpu/gaudi - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export NUM_CARDS=1 - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + export NON_INTERACTIVE=true + source set_env.sh # Start Docker Containers docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log diff --git a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh index 2d79b0e7a2..256d9de230 100644 --- a/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh +++ b/ChatQnA/tests/test_compose_without_rerank_on_xeon.sh @@ -2,7 +2,7 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -set -e +set -xe IMAGE_REPO=${IMAGE_REPO:-"opea"} IMAGE_TAG=${IMAGE_TAG:-"latest"} echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}" @@ -41,10 +41,7 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon - export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export LLM_MODEL_ID="meta-llama/Meta-Llama-3-8B-Instruct" - export INDEX_NAME="rag-redis" - export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} + source set_env.sh # Start Docker Containers docker compose -f compose_without_rerank.yaml up -d > ${LOG_PATH}/start_services_with_compose.log