Skip to content

Commit d2d7793

Browse files
authored
Merge branch 'main' into searchqna_fix
2 parents 7d281a5 + 9f80a18 commit d2d7793

21 files changed

+332
-177
lines changed

.github/workflows/pr-link-path-scan.yml

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -36,16 +36,20 @@ jobs:
3636
# echo $url_line
3737
url=$(echo "$url_line"|cut -d '(' -f2 | cut -d ')' -f1|sed 's/\.git$//')
3838
path=$(echo "$url_line"|cut -d':' -f1 | cut -d'/' -f2-)
39-
sleep $delay
40-
response=$(curl -L -s -o /dev/null -w "%{http_code}" "$url")|| true
41-
if [ "$response" -ne 200 ]; then
42-
echo "**********Validation failed ($response), try again**********"
43-
response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url")
44-
if [ "$response_retry" -eq 200 ]; then
45-
echo "*****Retry successfully*****"
46-
else
47-
echo "Invalid link ($response_retry) from ${{github.workspace}}/$path: $url"
48-
fail="TRUE"
39+
if [[ "$url" == "https://platform.openai.com/api-keys"* ]]; then
40+
echo "Link "$url" from ${{github.workspace}}/$path needs to be verified by a real person."
41+
else
42+
sleep $delay
43+
response=$(curl -L -s -o /dev/null -w "%{http_code}" "$url")|| true
44+
if [ "$response" -ne 200 ]; then
45+
echo "**********Validation failed ($response), try again**********"
46+
response_retry=$(curl -s -o /dev/null -w "%{http_code}" "$url")
47+
if [ "$response_retry" -eq 200 ]; then
48+
echo "*****Retry successfully*****"
49+
else
50+
echo "Invalid link ($response_retry) from ${{github.workspace}}/$path: $url"
51+
fail="TRUE"
52+
fi
4953
fi
5054
fi
5155
done

AgentQnA/README.md

Lines changed: 53 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ flowchart LR
9999

100100
#### First, clone the `GenAIExamples` repo.
101101

102-
```
102+
```bash
103103
export WORKDIR=<your-work-directory>
104104
cd $WORKDIR
105105
git clone https://github.com/opea-project/GenAIExamples.git
@@ -109,7 +109,7 @@ git clone https://github.com/opea-project/GenAIExamples.git
109109

110110
##### For proxy environments only
111111

112-
```
112+
```bash
113113
export http_proxy="Your_HTTP_Proxy"
114114
export https_proxy="Your_HTTPs_Proxy"
115115
# Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1"
@@ -118,31 +118,43 @@ export no_proxy="Your_No_Proxy"
118118

119119
##### For using open-source llms
120120

121-
```
121+
Set up a [HuggingFace](https://huggingface.co/) account and generate a [user access token](https://huggingface.co/docs/transformers.js/en/guides/private#step-1-generating-a-user-access-token).
122+
123+
Then set an environment variable with the token and another for a directory to download the models:
124+
125+
```bash
122126
export HUGGINGFACEHUB_API_TOKEN=<your-HF-token>
123-
export HF_CACHE_DIR=<directory-where-llms-are-downloaded> #so that no need to redownload every time
127+
export HF_CACHE_DIR=<directory-where-llms-are-downloaded> # to avoid redownloading models
124128
```
125129

126-
##### [Optional] OPANAI_API_KEY to use OpenAI models
130+
##### [Optional] OPENAI_API_KEY to use OpenAI models or Intel® AI for Enterprise Inference
127131

128-
```
132+
To use OpenAI models, generate a key following these [instructions](https://platform.openai.com/api-keys).
133+
134+
To use a remote server running Intel® AI for Enterprise Inference, contact the cloud service provider or owner of the on-prem machine for a key to access the desired model on the server.
135+
136+
Then set the environment variable `OPENAI_API_KEY` with the key contents:
137+
138+
```bash
129139
export OPENAI_API_KEY=<your-openai-key>
130140
```
131141

132142
#### Third, set up environment variables for the selected hardware using the corresponding `set_env.sh`
133143

134144
##### Gaudi
135145

136-
```
146+
```bash
137147
source $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/hpu/gaudi/set_env.sh
138148
```
139149

140150
##### Xeon
141151

142-
```
152+
```bash
143153
source $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/cpu/xeon/set_env.sh
144154
```
145155

156+
For running
157+
146158
### 2. Launch the multi-agent system. </br>
147159

148160
We make it convenient to launch the whole system with docker compose, which includes microservices for LLM, agents, UI, retrieval tool, vector database, dataprep, and telemetry. There are 3 docker compose files, which make it easy for users to pick and choose. Users can choose a different retrieval tool other than the `DocIndexRetriever` example provided in our GenAIExamples repo. Users can choose not to launch the telemetry containers.
@@ -184,14 +196,37 @@ docker compose -f $WORKDIR/GenAIExamples/DocIndexRetriever/docker_compose/intel/
184196

185197
#### Launch on Xeon
186198

187-
On Xeon, only OpenAI models are supported. The command below will launch the multi-agent system with the `DocIndexRetriever` as the retrieval tool for the Worker RAG agent.
199+
On Xeon, OpenAI models and models deployed on a remote server are supported. Both methods require an API key.
188200

189201
```bash
190202
export OPENAI_API_KEY=<your-openai-key>
191203
cd $WORKDIR/GenAIExamples/AgentQnA/docker_compose/intel/cpu/xeon
204+
```
205+
206+
##### OpenAI Models
207+
208+
The command below will launch the multi-agent system with the `DocIndexRetriever` as the retrieval tool for the Worker RAG agent.
209+
210+
```bash
192211
docker compose -f $WORKDIR/GenAIExamples/DocIndexRetriever/docker_compose/intel/cpu/xeon/compose.yaml -f compose_openai.yaml up -d
193212
```
194213

214+
##### Models on Remote Server
215+
216+
When models are deployed on a remote server with Intel® AI for Enterprise Inference, a base URL and an API key are required to access them. To run the Agent microservice on Xeon while using models deployed on a remote server, add `compose_remote.yaml` to the `docker compose` command and set additional environment variables.
217+
218+
###### Notes
219+
220+
- `OPENAI_API_KEY` is already set in a previous step.
221+
- `model` is used to overwrite the value set for this environment variable in `set_env.sh`.
222+
- `LLM_ENDPOINT_URL` is the base URL given from the owner of the on-prem machine or cloud service provider. It will follow this format: "https://<DNS>". Here is an example: "https://api.inference.example.com".
223+
224+
```bash
225+
export model=<name-of-model-card>
226+
export LLM_ENDPOINT_URL=<http-endpoint-of-remote-server>
227+
docker compose -f $WORKDIR/GenAIExamples/DocIndexRetriever/docker_compose/intel/cpu/xeon/compose.yaml -f compose_openai.yaml -f compose_remote.yaml up -d
228+
```
229+
195230
### 3. Ingest Data into the vector database
196231

197232
The `run_ingest_data.sh` script will use an example jsonl file to ingest example documents into a vector database. Other ways to ingest data and other types of documents supported can be found in the OPEA dataprep microservice located in the opea-project/GenAIComps repo.
@@ -208,12 +243,18 @@ bash run_ingest_data.sh
208243
The UI microservice is launched in the previous step with the other microservices.
209244
To see the UI, open a web browser to `http://${ip_address}:5173` to access the UI. Note the `ip_address` here is the host IP of the UI microservice.
210245

211-
1. `create Admin Account` with a random value
212-
2. add opea agent endpoint `http://$ip_address:9090/v1` which is a openai compatible api
246+
1. Click on the arrow above `Get started`. Create an admin account with a name, email, and password.
247+
2. Add an OpenAI-compatible API endpoint. In the upper right, click on the circle button with the user's initial, go to `Admin Settings`->`Connections`. Under `Manage OpenAI API Connections`, click on the `+` to add a connection. Fill in these fields:
248+
249+
- **URL**: `http://${ip_address}:9090/v1`, do not forget the `v1`
250+
- **Key**: any value
251+
- **Model IDs**: any name i.e. `opea-agent`, then press `+` to add it
252+
253+
Click "Save".
213254

214255
![opea-agent-setting](assets/img/opea-agent-setting.png)
215256

216-
3. test opea agent with ui
257+
3. Test OPEA agent with UI. Return to `New Chat` and ensure the model (i.e. `opea-agent`) is selected near the upper left. Enter in any prompt to interact with the agent.
217258

218259
![opea-agent-test](assets/img/opea-agent-test.png)
219260

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Copyright (C) 2025 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
services:
5+
worker-rag-agent:
6+
environment:
7+
llm_endpoint_url: ${LLM_ENDPOINT_URL}
8+
api_key: ${OPENAI_API_KEY}
9+
10+
worker-sql-agent:
11+
environment:
12+
llm_endpoint_url: ${LLM_ENDPOINT_URL}
13+
api_key: ${OPENAI_API_KEY}
14+
15+
supervisor-react-agent:
16+
environment:
17+
llm_endpoint_url: ${LLM_ENDPOINT_URL}
18+
api_key: ${OPENAI_API_KEY}

CodeTrans/Dockerfile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
# Copyright (C) 2024 Intel Corporation
22
# SPDX-License-Identifier: Apache-2.0
33

4+
ARG IMAGE_REPO=opea
45
ARG BASE_TAG=latest
5-
FROM opea/comps-base:$BASE_TAG
6+
FROM $IMAGE_REPO/comps-base:$BASE_TAG
67

78
COPY ./code_translation.py $HOME/code_translation.py
89

CodeTrans/docker_image_build/build.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@ services:
55
codetrans:
66
build:
77
args:
8+
IMAGE_REPO: ${REGISTRY:-opea}
9+
BASE_TAG: ${TAG:-latest}
810
http_proxy: ${http_proxy}
911
https_proxy: ${https_proxy}
1012
no_proxy: ${no_proxy}
@@ -45,4 +47,5 @@ services:
4547
build:
4648
context: GenAIComps
4749
dockerfile: comps/third_parties/vllm/src/Dockerfile.amd_gpu
50+
extends: codetrans
4851
image: ${REGISTRY:-opea}/vllm-rocm:${TAG:-latest}

CodeTrans/tests/test_compose_on_gaudi.sh

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
1717

1818
function build_docker_images() {
1919
opea_branch=${opea_branch:-"main"}
20-
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
21-
if [[ "${opea_branch}" != "main" ]]; then
22-
cd $WORKPATH
23-
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
24-
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
25-
find . -type f -name "Dockerfile*" | while read -r file; do
26-
echo "Processing file: $file"
27-
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
28-
done
29-
fi
3020

3121
cd $WORKPATH/docker_image_build
3222
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
23+
pushd GenAIComps
24+
echo "GenAIComps test commit is $(git rev-parse HEAD)"
25+
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
26+
popd && sleep 1s
27+
3328
git clone https://github.com/HabanaAI/vllm-fork.git && cd vllm-fork
3429
VLLM_FORK_VER=v0.6.6.post1+Gaudi-1.20.0
3530
git checkout ${VLLM_FORK_VER} &> /dev/null && cd ../
@@ -160,17 +155,35 @@ function stop_docker() {
160155

161156
function main() {
162157

158+
echo "::group::stop_docker"
163159
stop_docker
160+
echo "::endgroup::"
164161

162+
echo "::group::build_docker_images"
165163
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
164+
echo "::endgroup::"
165+
166+
echo "::group::start_services"
166167
start_services
168+
echo "::endgroup::"
167169

170+
echo "::group::validate_microservices"
168171
validate_microservices
172+
echo "::endgroup::"
173+
174+
echo "::group::validate_megaservice"
169175
validate_megaservice
176+
echo "::endgroup::"
177+
178+
echo "::group::validate_frontend"
170179
validate_frontend
180+
echo "::endgroup::"
171181

182+
echo "::group::stop_docker"
172183
stop_docker
173-
echo y | docker system prune
184+
echo "::endgroup::"
185+
186+
docker system prune -f
174187

175188
}
176189

CodeTrans/tests/test_compose_on_rocm.sh

Lines changed: 23 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,19 +18,13 @@ ip_address=$(hostname -I | awk '{print $1}')
1818

1919
function build_docker_images() {
2020
opea_branch=${opea_branch:-"main"}
21-
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
22-
if [[ "${opea_branch}" != "main" ]]; then
23-
cd $WORKPATH
24-
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
25-
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
26-
find . -type f -name "Dockerfile*" | while read -r file; do
27-
echo "Processing file: $file"
28-
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
29-
done
30-
fi
3121

3222
cd $WORKPATH/docker_image_build
3323
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
24+
pushd GenAIComps
25+
echo "GenAIComps test commit is $(git rev-parse HEAD)"
26+
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
27+
popd && sleep 1s
3428

3529
echo "Build all the images with --no-cache, check docker_image_build.log for details..."
3630
service_list="codetrans codetrans-ui llm-textgen nginx"
@@ -161,17 +155,35 @@ function stop_docker() {
161155

162156
function main() {
163157

158+
echo "::group::stop_docker"
164159
stop_docker
160+
echo "::endgroup::"
165161

162+
echo "::group::build_docker_images"
166163
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
164+
echo "::endgroup::"
165+
166+
echo "::group::start_services"
167167
start_services
168+
echo "::endgroup::"
168169

170+
echo "::group::validate_microservices"
169171
validate_microservices
172+
echo "::endgroup::"
173+
174+
echo "::group::validate_megaservice"
170175
validate_megaservice
176+
echo "::endgroup::"
177+
178+
echo "::group::validate_frontend"
171179
validate_frontend
180+
echo "::endgroup::"
172181

182+
echo "::group::stop_docker"
173183
stop_docker
174-
echo y | docker system prune
184+
echo "::endgroup::"
185+
186+
docker system prune -f
175187

176188
}
177189

CodeTrans/tests/test_compose_on_xeon.sh

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,14 @@ ip_address=$(hostname -I | awk '{print $1}')
1717

1818
function build_docker_images() {
1919
opea_branch=${opea_branch:-"main"}
20-
# If the opea_branch isn't main, replace the git clone branch in Dockerfile.
21-
if [[ "${opea_branch}" != "main" ]]; then
22-
cd $WORKPATH
23-
OLD_STRING="RUN git clone --depth 1 https://github.com/opea-project/GenAIComps.git"
24-
NEW_STRING="RUN git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git"
25-
find . -type f -name "Dockerfile*" | while read -r file; do
26-
echo "Processing file: $file"
27-
sed -i "s|$OLD_STRING|$NEW_STRING|g" "$file"
28-
done
29-
fi
3020

3121
cd $WORKPATH/docker_image_build
3222
git clone --depth 1 --branch ${opea_branch} https://github.com/opea-project/GenAIComps.git
23+
pushd GenAIComps
24+
echo "GenAIComps test commit is $(git rev-parse HEAD)"
25+
docker build --no-cache -t ${REGISTRY}/comps-base:${TAG} --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile .
26+
popd && sleep 1s
27+
3328
git clone https://github.com/vllm-project/vllm.git && cd vllm
3429
VLLM_VER="v0.8.3"
3530
echo "Check out vLLM tag ${VLLM_VER}"
@@ -163,17 +158,35 @@ function stop_docker() {
163158

164159
function main() {
165160

161+
echo "::group::stop_docker"
166162
stop_docker
163+
echo "::endgroup::"
167164

165+
echo "::group::build_docker_images"
168166
if [[ "$IMAGE_REPO" == "opea" ]]; then build_docker_images; fi
167+
echo "::endgroup::"
168+
169+
echo "::group::start_services"
169170
start_services
171+
echo "::endgroup::"
170172

173+
echo "::group::validate_microservices"
171174
validate_microservices
175+
echo "::endgroup::"
176+
177+
echo "::group::validate_megaservice"
172178
validate_megaservice
179+
echo "::endgroup::"
180+
181+
echo "::group::validate_frontend"
173182
validate_frontend
183+
echo "::endgroup::"
174184

185+
echo "::group::stop_docker"
175186
stop_docker
176-
echo y | docker system prune
187+
echo "::endgroup::"
188+
189+
docker system prune -f
177190

178191
}
179192

0 commit comments

Comments
 (0)