Skip to content

Commit ca15fe9

Browse files
authored
Refactor lvm related examples (#1333)
1 parent f48bd8e commit ca15fe9

File tree

25 files changed

+161
-154
lines changed

25 files changed

+161
-154
lines changed

MultimodalQnA/docker_compose/amd/gpu/rocm/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ docker build --no-cache -t opea/embedding:latest --build-arg https_proxy=$https_
3939
Build lvm-llava image
4040

4141
```bash
42-
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/llava/dependency/Dockerfile .
42+
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/integrations/dependency/llava/Dockerfile .
4343
```
4444

4545
### 3. Build retriever-multimodal-redis Image
@@ -85,7 +85,7 @@ Then run the command `docker images`, you will have the following 8 Docker Image
8585

8686
1. `opea/dataprep-multimodal-redis:latest`
8787
2. `ghcr.io/huggingface/text-generation-inference:2.4.1-rocm`
88-
3. `opea/lvm-tgi:latest`
88+
3. `opea/lvm:latest`
8989
4. `opea/retriever-multimodal-redis:latest`
9090
5. `opea/embedding:latest`
9191
6. `opea/embedding-multimodal-bridgetower:latest`
@@ -193,7 +193,7 @@ curl http://${host_ip}:${LLAVA_SERVER_PORT}/generate \
193193
-d '{"prompt":"Describe the image please.", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
194194
```
195195

196-
5. lvm-llava-svc
196+
5. lvm
197197

198198
```bash
199199
curl http://${host_ip}:9399/v1/lvm \

MultimodalQnA/docker_compose/amd/gpu/rocm/compose.yaml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ services:
2424
container_name: dataprep-multimodal-redis
2525
depends_on:
2626
- redis-vector-db
27-
- lvm-tgi
27+
- lvm
2828
ports:
2929
- "6007:6007"
3030
environment:
@@ -116,9 +116,9 @@ services:
116116
ipc: host
117117
command: --model-id ${LVM_MODEL_ID} --max-input-tokens 3048 --max-total-tokens 4096
118118
restart: unless-stopped
119-
lvm-tgi:
120-
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
121-
container_name: lvm-tgi
119+
lvm:
120+
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
121+
container_name: lvm
122122
depends_on:
123123
- tgi-rocm
124124
ports:
@@ -128,6 +128,7 @@ services:
128128
no_proxy: ${no_proxy}
129129
http_proxy: ${http_proxy}
130130
https_proxy: ${https_proxy}
131+
LVM_COMPONENT_NAME: "OPEA_TGI_LLAVA_LVM"
131132
LVM_ENDPOINT: ${LVM_ENDPOINT}
132133
HF_HUB_DISABLE_PROGRESS_BARS: 1
133134
HF_HUB_ENABLE_HF_TRANSFER: 0
@@ -140,7 +141,7 @@ services:
140141
- dataprep-multimodal-redis
141142
- embedding
142143
- retriever-redis
143-
- lvm-tgi
144+
- lvm
144145
ports:
145146
- "8888:8888"
146147
environment:

MultimodalQnA/docker_compose/intel/cpu/xeon/README.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ lvm-llava
3636
================
3737
Port 8399 - Open to 0.0.0.0/0
3838
39-
lvm-llava-svc
39+
lvm
4040
===
4141
Port 9399 - Open to 0.0.0.0/0
4242
@@ -132,13 +132,13 @@ docker build --no-cache -t opea/retriever-redis:latest --build-arg https_proxy=$
132132
Build lvm-llava image
133133

134134
```bash
135-
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/llava/dependency/Dockerfile .
135+
docker build --no-cache -t opea/lvm-llava:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/integrations/dependency/llava/Dockerfile .
136136
```
137137

138-
Build lvm-llava-svc microservice image
138+
Build lvm microservice image
139139

140140
```bash
141-
docker build --no-cache -t opea/lvm-llava-svc:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/llava/Dockerfile .
141+
docker build --no-cache -t opea/lvm:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/Dockerfile .
142142
```
143143

144144
### 4. Build dataprep-multimodal-redis Image
@@ -179,7 +179,7 @@ cd ../../../
179179
Then run the command `docker images`, you will have the following 11 Docker Images:
180180

181181
1. `opea/dataprep-multimodal-redis:latest`
182-
2. `opea/lvm-llava-svc:latest`
182+
2. `opea/lvm:latest`
183183
3. `opea/lvm-llava:latest`
184184
4. `opea/retriever-multimodal-redis:latest`
185185
5. `opea/whisper:latest`
@@ -271,7 +271,7 @@ curl http://${host_ip}:${LLAVA_SERVER_PORT}/generate \
271271
-d '{"prompt":"Describe the image please.", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
272272
```
273273

274-
6. lvm-llava-svc
274+
6. lvm
275275

276276
```bash
277277
curl http://${host_ip}:9399/v1/lvm \

MultimodalQnA/docker_compose/intel/cpu/xeon/compose.yaml

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,9 @@ services:
100100
https_proxy: ${https_proxy}
101101
entrypoint: ["python", "llava_server.py", "--device", "cpu", "--model_name_or_path", $LVM_MODEL_ID]
102102
restart: unless-stopped
103-
lvm-llava-svc:
104-
image: ${REGISTRY:-opea}/lvm-llava-svc:${TAG:-latest}
105-
container_name: lvm-llava-svc
103+
lvm:
104+
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
105+
container_name: lvm
106106
depends_on:
107107
- lvm-llava
108108
ports:
@@ -112,6 +112,7 @@ services:
112112
no_proxy: ${no_proxy}
113113
http_proxy: ${http_proxy}
114114
https_proxy: ${https_proxy}
115+
LVM_COMPONENT_NAME: "OPEA_LLAVA_LVM"
115116
LVM_ENDPOINT: ${LVM_ENDPOINT}
116117
restart: unless-stopped
117118
multimodalqna:
@@ -122,7 +123,7 @@ services:
122123
- dataprep-multimodal-redis
123124
- embedding
124125
- retriever-redis
125-
- lvm-llava-svc
126+
- lvm
126127
ports:
127128
- "8888:8888"
128129
environment:

MultimodalQnA/docker_compose/intel/hpu/gaudi/README.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -86,10 +86,10 @@ Build TGI Gaudi image
8686
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
8787
```
8888
89-
Build lvm-tgi microservice image
89+
Build lvm microservice image
9090
9191
```bash
92-
docker build --no-cache -t opea/lvm-tgi:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/tgi-llava/Dockerfile .
92+
docker build --no-cache -t opea/lvm:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/Dockerfile .
9393
```
9494
9595
### 4. Build dataprep-multimodal-redis Image
@@ -128,7 +128,7 @@ docker build --no-cache -t opea/multimodalqna-ui:latest --build-arg https_proxy=
128128
Then run the command `docker images`, you will have the following 11 Docker Images:
129129
130130
1. `opea/dataprep-multimodal-redis:latest`
131-
2. `opea/lvm-tgi:latest`
131+
2. `opea/lvm:latest`
132132
3. `ghcr.io/huggingface/tgi-gaudi:2.0.6`
133133
4. `opea/retriever-multimodal-redis:latest`
134134
5. `opea/whisper:latest`
@@ -220,7 +220,7 @@ curl http://${host_ip}:${LLAVA_SERVER_PORT}/generate \
220220
-H 'Content-Type: application/json'
221221
```
222222
223-
6. lvm-tgi
223+
6. lvm
224224
225225
```bash
226226
curl http://${host_ip}:9399/v1/lvm \
@@ -274,7 +274,7 @@ curl --silent --write-out "HTTPSTATUS:%{http_code}" \
274274
-F "files=@./${audio_fn}"
275275
```
276276

277-
Also, test dataprep microservice with generating an image caption using lvm-tgi
277+
Also, test dataprep microservice with generating an image caption using lvm
278278

279279
```bash
280280
curl --silent --write-out "HTTPSTATUS:%{http_code}" \

MultimodalQnA/docker_compose/intel/hpu/gaudi/compose.yaml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ services:
2424
container_name: dataprep-multimodal-redis
2525
depends_on:
2626
- redis-vector-db
27-
- lvm-tgi
27+
- lvm
2828
ports:
2929
- "6007:6007"
3030
environment:
@@ -115,9 +115,9 @@ services:
115115
ipc: host
116116
command: --model-id ${LVM_MODEL_ID} --max-input-tokens 3048 --max-total-tokens 4096
117117
restart: unless-stopped
118-
lvm-tgi:
119-
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
120-
container_name: lvm-tgi
118+
lvm:
119+
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
120+
container_name: lvm
121121
depends_on:
122122
- tgi-gaudi
123123
ports:
@@ -127,6 +127,7 @@ services:
127127
no_proxy: ${no_proxy}
128128
http_proxy: ${http_proxy}
129129
https_proxy: ${https_proxy}
130+
LVM_COMPONENT_NAME: "OPEA_TGI_LLAVA_LVM"
130131
LVM_ENDPOINT: ${LVM_ENDPOINT}
131132
HF_HUB_DISABLE_PROGRESS_BARS: 1
132133
HF_HUB_ENABLE_HF_TRANSFER: 0
@@ -139,7 +140,7 @@ services:
139140
- dataprep-multimodal-redis
140141
- embedding
141142
- retriever-redis
142-
- lvm-tgi
143+
- lvm
143144
ports:
144145
- "8888:8888"
145146
environment:

MultimodalQnA/docker_image_build/build.yaml

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -38,21 +38,15 @@ services:
3838
lvm-llava:
3939
build:
4040
context: GenAIComps
41-
dockerfile: comps/lvms/llava/dependency/Dockerfile
41+
dockerfile: comps/lvms/src/integrations/dependency/llava/Dockerfile
4242
extends: multimodalqna
4343
image: ${REGISTRY:-opea}/lvm-llava:${TAG:-latest}
44-
lvm-llava-svc:
44+
lvm:
4545
build:
4646
context: GenAIComps
47-
dockerfile: comps/lvms/llava/Dockerfile
47+
dockerfile: comps/lvms/src/Dockerfile
4848
extends: multimodalqna
49-
image: ${REGISTRY:-opea}/lvm-llava-svc:${TAG:-latest}
50-
lvm-tgi:
51-
build:
52-
context: GenAIComps
53-
dockerfile: comps/lvms/tgi-llava/Dockerfile
54-
extends: multimodalqna
55-
image: ${REGISTRY:-opea}/lvm-tgi:${TAG:-latest}
49+
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
5650
dataprep-multimodal-redis:
5751
build:
5852
context: GenAIComps

MultimodalQnA/tests/test_compose_on_gaudi.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ function build_docker_images() {
2222
cd $WORKPATH/docker_image_build
2323
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
2424
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
25-
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-tgi dataprep-multimodal-redis whisper"
25+
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm dataprep-multimodal-redis whisper"
2626
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
2727

2828
docker pull ghcr.io/huggingface/tgi-gaudi:2.0.6
@@ -214,12 +214,12 @@ function validate_microservices() {
214214
'{"inputs":"![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n","parameters":{"max_new_tokens":16, "seed": 42}}'
215215

216216
# lvm
217-
echo "Evaluating lvm-tgi"
217+
echo "Evaluating lvm"
218218
validate_service \
219219
"http://${host_ip}:9399/v1/lvm" \
220220
'"text":"' \
221-
"lvm-tgi" \
222-
"lvm-tgi" \
221+
"lvm" \
222+
"lvm" \
223223
'{"retrieved_docs": [], "initial_query": "What is this?", "top_n": 1, "metadata": [{"b64_img_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "transcript_for_inference": "yellow image", "video_id": "8c7461df-b373-4a00-8696-9a2234359fe0", "time_of_frame_ms":"37000000", "source_video":"WeAreGoingOnBullrun_8c7461df-b373-4a00-8696-9a2234359fe0.mp4"}], "chat_template":"The caption of the image is: '\''{context}'\''. {question}"}'
224224

225225
# data prep requiring lvm

MultimodalQnA/tests/test_compose_on_rocm.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ function build_docker_images() {
2323
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
2424

2525
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
26-
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-tgi lvm-llava-svc dataprep-multimodal-redis whisper"
26+
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm dataprep-multimodal-redis whisper"
2727
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
2828

2929
docker images && sleep 1m
@@ -220,12 +220,12 @@ function validate_microservices() {
220220
'{"inputs":"![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n","parameters":{"max_new_tokens":16, "seed": 42}}'
221221

222222
# lvm
223-
echo "Evaluating lvm-llava-svc"
223+
echo "Evaluating lvm"
224224
validate_service \
225225
"http://${host_ip}:9399/v1/lvm" \
226226
'"text":"' \
227-
"lvm-tgi" \
228-
"lvm-tgi" \
227+
"lvm" \
228+
"lvm" \
229229
'{"retrieved_docs": [], "initial_query": "What is this?", "top_n": 1, "metadata": [{"b64_img_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "transcript_for_inference": "yellow image", "video_id": "8c7461df-b373-4a00-8696-9a2234359fe0", "time_of_frame_ms":"37000000", "source_video":"WeAreGoingOnBullrun_8c7461df-b373-4a00-8696-9a2234359fe0.mp4"}], "chat_template":"The caption of the image is: '\''{context}'\''. {question}"}'
230230

231231
# data prep requiring lvm

MultimodalQnA/tests/test_compose_on_xeon.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ function build_docker_images() {
2222
cd $WORKPATH/docker_image_build
2323
git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout "${opea_branch:-"main"}" && cd ../
2424
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
25-
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-llava lvm-llava-svc dataprep-multimodal-redis whisper"
25+
service_list="multimodalqna multimodalqna-ui embedding-multimodal-bridgetower embedding retriever-redis lvm-llava lvm dataprep-multimodal-redis whisper"
2626
docker compose -f build.yaml build ${service_list} --no-cache > ${LOG_PATH}/docker_image_build.log
2727

2828
docker images && sleep 1m
@@ -212,12 +212,12 @@ function validate_microservices() {
212212
'{"prompt":"Describe the image please.", "img_b64_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC"}'
213213

214214
# lvm
215-
echo "Evaluating lvm-llava-svc"
215+
echo "Evaluating lvm"
216216
validate_service \
217217
"http://${host_ip}:9399/v1/lvm" \
218218
'"text":"' \
219-
"lvm-llava-svc" \
220-
"lvm-llava-svc" \
219+
"lvm" \
220+
"lvm" \
221221
'{"retrieved_docs": [], "initial_query": "What is this?", "top_n": 1, "metadata": [{"b64_img_str": "iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAFUlEQVR42mP8/5+hnoEIwDiqkL4KAcT9GO0U4BxoAAAAAElFTkSuQmCC", "transcript_for_inference": "yellow image", "video_id": "8c7461df-b373-4a00-8696-9a2234359fe0", "time_of_frame_ms":"37000000", "source_video":"WeAreGoingOnBullrun_8c7461df-b373-4a00-8696-9a2234359fe0.mp4"}], "chat_template":"The caption of the image is: '\''{context}'\''. {question}"}'
222222

223223
# data prep requiring lvm

VideoQnA/docker_compose/intel/cpu/xeon/README.md

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -71,10 +71,10 @@ docker build -t opea/reranking:latest --build-arg https_proxy=$https_proxy --bui
7171
### 4. Build LVM Image (Xeon)
7272

7373
```bash
74-
docker build -t opea/video-llama-lvm-server:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/video-llama/dependency/Dockerfile .
74+
docker build -t opea/lvm-video-llama:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/integrations/dependency/video-llama/Dockerfile .
7575

7676
# LVM Service Image
77-
docker build -t opea/lvm-video-llama:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/video-llama/Dockerfile .
77+
docker build -t opea/lvm:latest --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f comps/lvms/src/Dockerfile .
7878
```
7979

8080
### 5. Build Dataprep Image
@@ -109,11 +109,16 @@ Then run the command `docker images`, you will have the following 8 Docker Image
109109
1. `opea/dataprep-multimodal-vdms:latest`
110110
2. `opea/embedding-multimodal-clip:latest`
111111
3. `opea/retriever-vdms:latest`
112+
<<<<<<< HEAD
112113
4. `opea/reranking:latest`
113114
5. `opea/video-llama-lvm-server:latest`
114-
6. `opea/lvm-video-llama:latest`
115-
7. `opea/videoqna:latest`
116-
8. `opea/videoqna-ui:latest`
115+
6. # `opea/lvm-video-llama:latest`
116+
7. `opea/reranking-tei:latest`
117+
8. `opea/lvm-video-llama:latest`
118+
9. `opea/lvm:latest`
119+
> > > > > > > d93597cbfd9da92b956adb3673c9e5d743c181af
120+
10. `opea/videoqna:latest`
121+
11. `opea/videoqna-ui:latest`
117122

118123
## 🚀 Start Microservices
119124

@@ -275,7 +280,7 @@ docker compose up -d
275280

276281
In first startup, this service will take times to download the LLM file. After it's finished, the service will be ready.
277282

278-
Use `docker logs video-llama-lvm-server` to check if the download is finished.
283+
Use `docker logs lvm-video-llama` to check if the download is finished.
279284

280285
```bash
281286
curl -X POST \

VideoQnA/docker_compose/intel/cpu/xeon/compose.yaml

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,8 @@ services:
7575
DATAPREP_GET_VIDEO_LIST_ENDPOINT: ${DATAPREP_GET_VIDEO_LIST_ENDPOINT}
7676
restart: unless-stopped
7777
lvm-video-llama:
78-
image: ${REGISTRY:-opea}/video-llama-lvm-server:${TAG:-latest}
79-
container_name: video-llama-lvm-server
78+
image: ${REGISTRY:-opea}/lvm-video-llama:${TAG:-latest}
79+
container_name: lvm-video-llama
8080
ports:
8181
- "9009:9009"
8282
ipc: host
@@ -90,15 +90,16 @@ services:
9090
- video-llama-model:/home/user/model
9191
restart: unless-stopped
9292
lvm:
93-
image: ${REGISTRY:-opea}/lvm-video-llama:${TAG:-latest}
94-
container_name: lvm-video-llama
93+
image: ${REGISTRY:-opea}/lvm:${TAG:-latest}
94+
container_name: lvm
9595
ports:
9696
- "9000:9000"
9797
ipc: host
9898
environment:
9999
http_proxy: ${http_proxy}
100100
https_proxy: ${https_proxy}
101101
no_proxy: ${no_proxy}
102+
LVM_COMPONENT_NAME: "OPEA_VIDEO_LLAMA_LVM"
102103
LVM_ENDPOINT: ${LVM_ENDPOINT}
103104
restart: unless-stopped
104105
depends_on:

0 commit comments

Comments
 (0)