Skip to content

Commit 11a19ba

Browse files
authored
Merge branch 'main' into searchqna_fix
2 parents 618fd75 + 09d93ec commit 11a19ba

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

51 files changed

+626
-524
lines changed

.github/workflows/_run-docker-compose.yml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -204,6 +204,10 @@ jobs:
204204
if [[ ! -z "$cid" ]]; then docker stop $cid && docker rm $cid && sleep 1s; fi
205205
206206
echo "Cleaning up images ..."
207+
df -h
208+
sleep 1
209+
docker system df
210+
sleep 1
207211
if [[ "${{ inputs.hardware }}" == "xeon"* ]]; then
208212
docker system prune -a -f
209213
else
@@ -213,7 +217,13 @@ jobs:
213217
docker images --filter reference="opea/comps-base" -q | xargs -r docker rmi && sleep 1s
214218
docker system prune -f
215219
fi
220+
sleep 5
216221
docker images
222+
sleep 1
223+
df -h
224+
sleep 1
225+
docker system df
226+
sleep 1
217227
218228
- name: Publish pipeline artifact
219229
if: ${{ !cancelled() }}

AvatarChatbot/docker_compose/amd/gpu/rocm/set_env.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
4141
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
4242
export AUDIO='None'
4343
export FACESIZE=96
44-
export OUTFILE="/outputs/result.mp4"
44+
export OUTFILE="./outputs/result.mp4"
4545
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
4646
export UPSCALE_FACTOR=1
47-
export FPS=10
47+
export FPS=5

AvatarChatbot/docker_compose/intel/cpu/xeon/set_env.sh

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,32 @@
55
pushd "../../../../../" > /dev/null
66
source .set_env.sh
77
popd > /dev/null
8+
9+
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
10+
export host_ip=$(hostname -I | awk '{print $1}')
11+
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
12+
export WAV2LIP_ENDPOINT=http://$host_ip:7860
13+
export MEGA_SERVICE_HOST_IP=${host_ip}
14+
export WHISPER_SERVER_HOST_IP=${host_ip}
15+
export WHISPER_SERVER_PORT=7066
16+
export SPEECHT5_SERVER_HOST_IP=${host_ip}
17+
export SPEECHT5_SERVER_PORT=7055
18+
export LLM_SERVER_HOST_IP=${host_ip}
19+
export LLM_SERVER_PORT=3006
20+
export ANIMATION_SERVICE_HOST_IP=${host_ip}
21+
export ANIMATION_SERVICE_PORT=3008
22+
23+
export MEGA_SERVICE_PORT=8888
24+
25+
export DEVICE="cpu"
26+
export WAV2LIP_PORT=7860
27+
export INFERENCE_MODE='wav2lip+gfpgan'
28+
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
29+
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
30+
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
31+
export AUDIO='None'
32+
export FACESIZE=96
33+
export OUTFILE="/outputs/result.mp4"
34+
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
35+
export UPSCALE_FACTOR=1
36+
export FPS=10

AvatarChatbot/docker_compose/intel/hpu/gaudi/set_env.sh

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,35 @@
55
pushd "../../../../../" > /dev/null
66
source .set_env.sh
77
popd > /dev/null
8+
9+
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
10+
export host_ip=$(hostname -I | awk '{print $1}')
11+
12+
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
13+
14+
export WAV2LIP_ENDPOINT=http://$host_ip:7860
15+
16+
export MEGA_SERVICE_HOST_IP=${host_ip}
17+
export WHISPER_SERVER_HOST_IP=${host_ip}
18+
export WHISPER_SERVER_PORT=7066
19+
export SPEECHT5_SERVER_HOST_IP=${host_ip}
20+
export SPEECHT5_SERVER_PORT=7055
21+
export LLM_SERVER_HOST_IP=${host_ip}
22+
export LLM_SERVER_PORT=3006
23+
export ANIMATION_SERVICE_HOST_IP=${host_ip}
24+
export ANIMATION_SERVICE_PORT=3008
25+
26+
export MEGA_SERVICE_PORT=8888
27+
28+
export DEVICE="hpu"
29+
export WAV2LIP_PORT=7860
30+
export INFERENCE_MODE='wav2lip+gfpgan'
31+
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
32+
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
33+
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
34+
export AUDIO='None'
35+
export FACESIZE=96
36+
export OUTFILE="/outputs/result.mp4"
37+
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
38+
export UPSCALE_FACTOR=1
39+
export FPS=10

AvatarChatbot/tests/README.md

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
# AvatarChatbot E2E test scripts
2+
3+
## Set the required environment variable
4+
5+
```bash
6+
export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token"
7+
```
8+
9+
## Run test
10+
11+
On Intel Xeon with TGI:
12+
13+
```bash
14+
bash test_compose_on_xeon.sh
15+
```
16+
17+
On Intel Gaudi with TGI:
18+
19+
```bash
20+
bash test_compose_on_gaudi.sh
21+
```
22+
23+
On AMD ROCm with TGI:
24+
25+
```bash
26+
bash test_compose_on_rocm.sh
27+
```

AvatarChatbot/tests/test_compose_on_gaudi.sh

Lines changed: 1 addition & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -45,37 +45,7 @@ function build_docker_images() {
4545
function start_services() {
4646
cd $WORKPATH/docker_compose/intel/hpu/gaudi
4747

48-
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
49-
export host_ip=$(hostname -I | awk '{print $1}')
50-
51-
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
52-
53-
export WAV2LIP_ENDPOINT=http://$host_ip:7860
54-
55-
export MEGA_SERVICE_HOST_IP=${host_ip}
56-
export WHISPER_SERVER_HOST_IP=${host_ip}
57-
export WHISPER_SERVER_PORT=7066
58-
export SPEECHT5_SERVER_HOST_IP=${host_ip}
59-
export SPEECHT5_SERVER_PORT=7055
60-
export LLM_SERVER_HOST_IP=${host_ip}
61-
export LLM_SERVER_PORT=3006
62-
export ANIMATION_SERVICE_HOST_IP=${host_ip}
63-
export ANIMATION_SERVICE_PORT=3008
64-
65-
export MEGA_SERVICE_PORT=8888
66-
67-
export DEVICE="hpu"
68-
export WAV2LIP_PORT=7860
69-
export INFERENCE_MODE='wav2lip+gfpgan'
70-
export CHECKPOINT_PATH='/usr/local/lib/python3.10/dist-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
71-
export FACE="/home/user/comps/animation/src/assets/img/avatar1.jpg"
72-
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
73-
export AUDIO='None'
74-
export FACESIZE=96
75-
export OUTFILE="/outputs/result.mp4"
76-
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
77-
export UPSCALE_FACTOR=1
78-
export FPS=10
48+
source set_env.sh
7949

8050
# Start Docker Containers
8151
docker compose up -d > ${LOG_PATH}/start_services_with_compose.log

AvatarChatbot/tests/test_compose_on_rocm.sh

Lines changed: 2 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# Copyright (C) 2024 Intel Corporation
33
# SPDX-License-Identifier: Apache-2.0
44

5-
set -e
5+
set -xe
66
IMAGE_REPO=${IMAGE_REPO:-"opea"}
77
IMAGE_TAG=${IMAGE_TAG:-"latest"}
88
echo "REGISTRY=IMAGE_REPO=${IMAGE_REPO}"
@@ -42,48 +42,8 @@ function build_docker_images() {
4242

4343
function start_services() {
4444
cd $WORKPATH/docker_compose/amd/gpu/rocm
45-
46-
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
4745
export OPENAI_API_KEY=$OPENAI_API_KEY
48-
export host_ip=${ip_address}
49-
50-
export TGI_SERVICE_PORT=3006
51-
export TGI_LLM_ENDPOINT=http://${host_ip}:${TGI_SERVICE_PORT}
52-
export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3"
53-
54-
export ASR_ENDPOINT=http://${host_ip}:7066
55-
export TTS_ENDPOINT=http://${host_ip}:7055
56-
export WAV2LIP_ENDPOINT=http://${host_ip}:7860
57-
58-
export MEGA_SERVICE_HOST_IP=${host_ip}
59-
export ASR_SERVICE_HOST_IP=${host_ip}
60-
export TTS_SERVICE_HOST_IP=${host_ip}
61-
export LLM_SERVICE_HOST_IP=${host_ip}
62-
export ANIMATION_SERVICE_HOST_IP=${host_ip}
63-
export WHISPER_SERVER_HOST_IP=${host_ip}
64-
export WHISPER_SERVER_PORT=7066
65-
66-
export SPEECHT5_SERVER_HOST_IP=${host_ip}
67-
export SPEECHT5_SERVER_PORT=7055
68-
69-
export MEGA_SERVICE_PORT=8888
70-
export ASR_SERVICE_PORT=3001
71-
export TTS_SERVICE_PORT=3002
72-
export LLM_SERVICE_PORT=3006
73-
export ANIMATION_SERVICE_PORT=3008
74-
75-
export DEVICE="cpu"
76-
export WAV2LIP_PORT=7860
77-
export INFERENCE_MODE='wav2lip+gfpgan'
78-
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
79-
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
80-
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
81-
export AUDIO='None'
82-
export FACESIZE=96
83-
export OUTFILE="./outputs/result.mp4"
84-
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
85-
export UPSCALE_FACTOR=1
86-
export FPS=5
46+
source set_env.sh
8747

8848
# Start Docker Containers
8949
docker compose up -d --force-recreate

AvatarChatbot/tests/test_compose_on_xeon.sh

Lines changed: 1 addition & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -45,37 +45,7 @@ function build_docker_images() {
4545
function start_services() {
4646
cd $WORKPATH/docker_compose/intel/cpu/xeon
4747

48-
export HUGGINGFACEHUB_API_TOKEN=$HUGGINGFACEHUB_API_TOKEN
49-
export host_ip=$(hostname -I | awk '{print $1}')
50-
51-
export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3
52-
53-
export WAV2LIP_ENDPOINT=http://$host_ip:7860
54-
55-
export MEGA_SERVICE_HOST_IP=${host_ip}
56-
export WHISPER_SERVER_HOST_IP=${host_ip}
57-
export WHISPER_SERVER_PORT=7066
58-
export SPEECHT5_SERVER_HOST_IP=${host_ip}
59-
export SPEECHT5_SERVER_PORT=7055
60-
export LLM_SERVER_HOST_IP=${host_ip}
61-
export LLM_SERVER_PORT=3006
62-
export ANIMATION_SERVICE_HOST_IP=${host_ip}
63-
export ANIMATION_SERVICE_PORT=3008
64-
65-
export MEGA_SERVICE_PORT=8888
66-
67-
export DEVICE="cpu"
68-
export WAV2LIP_PORT=7860
69-
export INFERENCE_MODE='wav2lip+gfpgan'
70-
export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth'
71-
export FACE="/home/user/comps/animation/src/assets/img/avatar5.png"
72-
# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None'
73-
export AUDIO='None'
74-
export FACESIZE=96
75-
export OUTFILE="/outputs/result.mp4"
76-
export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed
77-
export UPSCALE_FACTOR=1
78-
export FPS=10
48+
source set_env.sh
7949

8050
# Start Docker Containers
8151
docker compose up -d

ChatQnA/tests/test_compose_mariadb_on_xeon.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ function validate_megaservice() {
140140

141141
function stop_docker() {
142142
cd $WORKPATH/docker_compose/intel/cpu/xeon
143-
docker compose down
143+
docker compose -f compose_mariadb.yaml down
144144
}
145145

146146
function main() {

CodeGen/Dockerfile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
# Copyright (C) 2024 Intel Corporation
22
# SPDX-License-Identifier: Apache-2.0
33

4+
ARG IMAGE_REPO=opea
45
ARG BASE_TAG=latest
5-
FROM opea/comps-base:$BASE_TAG
6+
FROM $IMAGE_REPO/comps-base:$BASE_TAG
67

78
COPY ./codegen.py $HOME/codegen.py
89

CodeGen/docker_image_build/build.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@ services:
55
codegen:
66
build:
77
args:
8+
IMAGE_REPO: ${REGISTRY}
9+
BASE_TAG: ${TAG}
810
http_proxy: ${http_proxy}
911
https_proxy: ${https_proxy}
1012
no_proxy: ${no_proxy}
@@ -39,6 +41,7 @@ services:
3941
build:
4042
context: GenAIComps
4143
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
44+
extends: codegen
4245
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}
4346
vllm:
4447
build:

CodeGen/tests/test_compose_on_gaudi.sh

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,19 +27,13 @@ export no_proxy=${no_proxy},${ip_address}
2727

2828
function build_docker_images() {
2929
opea_branch=${opea_branch:-"main"}
30-
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
31-
if [[ "${opea_branch}" != "main" ]]; then
32-
cd $WORKPATH
33-
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
34-
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
35-
find . -type f -name "Dockerfile*" | while read -r file; do
36-
echo "Processing file: $file"
37-
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
38-
done
39-
fi
4030

4131
cd $WORKPATH/docker_image_build
4232
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
33+
pushd GenAIComps
34+
echo "GenAIComps test commit is $(git rev-parse HEAD)"
35+
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
36+
popd && sleep 1s
4337

4438
# Download Gaudi vllm of latest tag
4539
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
@@ -250,24 +244,36 @@ function main() {
250244
stop_docker "${docker_compose_profiles[${i}]}"
251245
done
252246

253-
# build docker images
247+
echo "::group::build_docker_images"
254248
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
249+
echo "::endgroup::"
255250

256251
# loop all profiles
257252
for ((i = 0; i < len_profiles; i++)); do
258253
echo "Process [${i}]: ${docker_compose_profiles[$i]}, ${docker_llm_container_names[${i}]}"
254+
255+
echo "::group::start_services"
259256
start_services "${docker_compose_profiles[${i}]}" "${docker_llm_container_names[${i}]}"
257+
echo "::endgroup::"
260258
docker ps -a
261259

260+
echo "::group::validate_microservices"
262261
validate_microservices "${docker_llm_container_names[${i}]}"
262+
echo "::endgroup::"
263+
264+
echo "::group::validate_megaservice"
263265
validate_megaservice
266+
echo "::endgroup::"
267+
268+
echo "::group::validate_gradio"
264269
validate_gradio
270+
echo "::endgroup::"
265271

266272
stop_docker "${docker_compose_profiles[${i}]}"
267273
sleep 5s
268274
done
269275

270-
echo y | docker system prune
276+
docker system prune -f
271277
}
272278

273279
main

0 commit comments

Comments
 (0)