Skip to content

Commit b307cc8

Browse files
authored
Fix langchain and huggingface version to avoid bug in FaqGen and DocSum, remove vllm hpu triton version fix (opea-project#1275)
* Fix langchain and huggingface version to avoid bug Signed-off-by: Xinyao Wang <xinyao.wang@intel.com>
1 parent 912e435 commit b307cc8

10 files changed

+6
-14
lines changed

.github/workflows/_comps-workflow.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ jobs:
7171
fi
7272
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_yml}) != 0 ]]; then
7373
git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git
74-
sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt
7574
fi
7675
- name: Get build list
7776
id: get-build-list

.github/workflows/push-image-build.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,6 @@ jobs:
9696
fi
9797
if [[ $(grep -c "vllm-gaudi:" ${docker_compose_path}) != 0 ]]; then
9898
git clone --depth 1 --branch v0.6.4.post2+Gaudi-1.19.0 https://github.com/HabanaAI/vllm-fork.git
99-
sed -i 's/triton/triton==3.1.0/g' vllm-fork/requirements-hpu.txt
10099
fi
101100
102101
- name: Build Image

comps/llms/src/doc-summarization/requirements.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
docarray[full]
22
fastapi
33
httpx==0.27.2
4-
huggingface_hub
5-
langchain #==0.1.12
4+
huggingface_hub==0.27.1
5+
langchain==0.3.14
66
langchain-huggingface
77
langchain-openai
8-
langchain_community
8+
langchain_community==0.3.14
99
langchainhub
1010
opentelemetry-api
1111
opentelemetry-exporter-otlp

comps/llms/src/faq-generation/requirements.txt

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
docarray[full]
22
fastapi
3-
huggingface_hub
4-
langchain
3+
huggingface_hub==0.27.1
4+
langchain==0.3.14
55
langchain-huggingface
66
langchain-openai
7-
langchain_community
7+
langchain_community==0.3.14
88
langchainhub
99
opentelemetry-api
1010
opentelemetry-exporter-otlp

comps/third_parties/vllm/src/build_docker_vllm.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ if [ "$hw_mode" = "hpu" ]; then
3838
git clone https://github.com/HabanaAI/vllm-fork.git
3939
cd ./vllm-fork/
4040
git checkout v0.6.4.post2+Gaudi-1.19.0
41-
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
4241
docker build -f Dockerfile.hpu -t opea/vllm-gaudi:latest --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
4342
cd ..
4443
rm -rf vllm-fork

tests/agent/test_agent_langchain_on_intel_hpu.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,6 @@ function build_vllm_docker_images() {
5757
fi
5858
cd ./vllm-fork
5959
git checkout v0.6.4.post2+Gaudi-1.19.0
60-
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
6160
docker build --no-cache -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g . --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy
6261
if [ $? -ne 0 ]; then
6362
echo "opea/vllm-gaudi:comps failed"

tests/guardrails/test_guardrails_hallucination_detection_on_intel_hpu.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ function build_docker_images() {
1313
git clone https://github.com/HabanaAI/vllm-fork.git
1414
cd vllm-fork/
1515
git checkout v0.6.4.post2+Gaudi-1.19.0
16-
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
1716
docker build --no-cache --build-arg https_proxy=$https_proxy --build-arg http_proxy=$http_proxy -f Dockerfile.hpu -t opea/vllm-gaudi:comps --shm-size=128g .
1817
if [ $? -ne 0 ]; then
1918
echo "opea/vllm-gaudi built fail"

tests/llms/test_llms_doc-summarization_vllm_on_intel_hpu.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ function build_docker_images() {
2020
git clone https://github.com/HabanaAI/vllm-fork.git
2121
cd vllm-fork/
2222
git checkout v0.6.4.post2+Gaudi-1.19.0
23-
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
2423
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
2524
if [ $? -ne 0 ]; then
2625
echo "opea/vllm-gaudi built fail"

tests/llms/test_llms_faq-generation_vllm_on_intel_hpu.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ function build_docker_images() {
2020
git clone https://github.com/HabanaAI/vllm-fork.git
2121
cd vllm-fork/
2222
git checkout v0.6.4.post2+Gaudi-1.19.0
23-
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
2423
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
2524
if [ $? -ne 0 ]; then
2625
echo "opea/vllm-gaudi built fail"

tests/llms/test_llms_text-generation_service_vllm_on_intel_hpu.sh

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@ function build_docker_images() {
2020
git clone https://github.com/HabanaAI/vllm-fork.git
2121
cd vllm-fork/
2222
git checkout v0.6.4.post2+Gaudi-1.19.0
23-
sed -i 's/triton/triton==3.1.0/g' requirements-hpu.txt
2423
docker build --no-cache -f Dockerfile.hpu -t ${REGISTRY:-opea}/vllm-gaudi:${TAG:-latest} --shm-size=128g .
2524
if [ $? -ne 0 ]; then
2625
echo "opea/vllm-gaudi built fail"

0 commit comments

Comments
 (0)