Skip to content

Commit 41b11e3

Browse files
committed
update test-files
Signed-off-by: Mustafa <mustafa.cetin@intel.com>
1 parent 64cf295 commit 41b11e3

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

CodeGen/tests/test_compose_on_gaudi.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ function start_services() {
6060

6161
cd $WORKPATH/docker_compose/intel/hpu/gaudi
6262

63-
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
63+
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
6464
export LLM_ENDPOINT="http://${ip_address}:8028"
6565
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
6666
export MEGA_SERVICE_PORT=7778
@@ -144,8 +144,8 @@ function validate_microservices() {
144144
"completion_tokens" \
145145
"llm-service" \
146146
"${llm_container_name}" \
147-
'{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "def print_hello_world():"}], "max_tokens": 256}'
148-
147+
'{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "def print_hello_world():"}], "max_tokens": 256}'
148+
149149
# llm microservice
150150
validate_services \
151151
"${ip_address}:9000/v1/chat/completions" \
@@ -176,7 +176,7 @@ function validate_megaservice() {
176176
# Curl the Mega Service with index_name and agents_flag
177177
validate_services \
178178
"${ip_address}:7778/v1/codegen" \
179-
"completion_tokens" \
179+
"" \
180180
"mega-codegen" \
181181
"codegen-gaudi-backend-server" \
182182
'{ "index_name": "test_redis", "agents_flag": "True", "messages": "def print_hello_world():", "max_tokens": 256}'

CodeGen/tests/test_compose_on_xeon.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ function start_services() {
6262

6363
cd $WORKPATH/docker_compose/intel/cpu/xeon/
6464

65-
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-32B-Instruct"
65+
export LLM_MODEL_ID="Qwen/Qwen2.5-Coder-7B-Instruct"
6666
export LLM_ENDPOINT="http://${ip_address}:8028"
6767
export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN}
6868
export MEGA_SERVICE_PORT=7778
@@ -143,7 +143,7 @@ function validate_microservices() {
143143
"completion_tokens" \
144144
"llm-service" \
145145
"${llm_container_name}" \
146-
'{"model": "Qwen/Qwen2.5-Coder-32B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 256}'
146+
'{"model": "Qwen/Qwen2.5-Coder-7B-Instruct", "messages": [{"role": "user", "content": "What is Deep Learning?"}], "max_tokens": 256}'
147147

148148
# llm microservice
149149
validate_services \
@@ -175,7 +175,7 @@ function validate_megaservice() {
175175
# Curl the Mega Service with index_name and agents_flag
176176
validate_services \
177177
"${ip_address}:7778/v1/codegen" \
178-
"completion_tokens" \
178+
"" \
179179
"mega-codegen" \
180180
"codegen-xeon-backend-server" \
181181
'{ "index_name": "test_redis", "agents_flag": "True", "messages": "def print_hello_world():", "max_tokens": 256}'
@@ -255,7 +255,7 @@ function main() {
255255
sleep 5s
256256
done
257257

258-
echo y | docker system prune
258+
# echo y | docker system prune
259259
}
260260

261261
main

0 commit comments

Comments
 (0)