diff --git a/example-apps/chatbot-rag-app/README.md b/example-apps/chatbot-rag-app/README.md index c8940731..a3a1e4da 100644 --- a/example-apps/chatbot-rag-app/README.md +++ b/example-apps/chatbot-rag-app/README.md @@ -51,14 +51,14 @@ ES_INDEX_CHAT_HISTORY=workplace-app-docs-chat-history ## Connecting to LLM -We support three LLM providers: Azure, OpenAI and Bedrock. - -To use one of them, you need to set the `LLM_TYPE` environment variable: +We support several LLM providers. To use one of them, you need to set the `LLM_TYPE` environment variable. For example: ```sh export LLM_TYPE=azure ``` +The following sub-sections define the configuration requirements of each supported LLM. + ### OpenAI To use OpenAI LLM, you will need to provide the OpenAI key via `OPENAI_API_KEY` environment variable: @@ -72,7 +72,7 @@ You can get your OpenAI key from the [OpenAI dashboard](https://platform.openai. ### Azure OpenAI -If you are using Azure LLM, you will need to set the following environment variables: +If you want to use Azure LLM, you will need to set the following environment variables: ```sh export LLM_TYPE=azure @@ -84,7 +84,7 @@ export OPENAI_ENGINE=... # deployment name in Azure ### Bedrock LLM -To use Bedrock LLM you need to set the following environment variables in order to AWS. +To use Bedrock LLM you need to set the following environment variables in order to authenticate to AWS. ```sh export LLM_TYPE=bedrock @@ -108,7 +108,7 @@ region=... ### Vertex AI -To use Vertex AI you need to set the following environment variables. More infos [here](https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm). +To use Vertex AI you need to set the following environment variables. More information [here](https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm). ```sh export LLM_TYPE=vertex @@ -117,6 +117,17 @@ export VERTEX_REGION= # Default is us-central1 export GOOGLE_APPLICATION_CREDENTIALS= ``` +### Mistral AI + +To use Mistral AI you need to set the following environment variables: + +``` +export LLM_TYPE=mistral +export MISTRAL_API_KEY=... +export MISTRAL_API_ENDPOINT=... # optional +export MISTRAL_MODEL=... # optional +``` + ## Running the App Once you have indexed data into the Elasticsearch index, there are two ways to run the app: via Docker or locally. Docker is advised for testing & production use. Locally is advised for development. diff --git a/example-apps/chatbot-rag-app/api/llm_integrations.py b/example-apps/chatbot-rag-app/api/llm_integrations.py index 8c20fe27..d6f4bb46 100644 --- a/example-apps/chatbot-rag-app/api/llm_integrations.py +++ b/example-apps/chatbot-rag-app/api/llm_integrations.py @@ -1,4 +1,11 @@ -from langchain.chat_models import ChatOpenAI, ChatVertexAI, AzureChatOpenAI, BedrockChat +from langchain_community.chat_models import ( + ChatOpenAI, + ChatVertexAI, + AzureChatOpenAI, + BedrockChat, +) +from langchain_core.messages import HumanMessage +from langchain_mistralai.chat_models import ChatMistralAI import os import vertexai import boto3 @@ -54,11 +61,27 @@ def init_bedrock(temperature): ) +def init_mistral_chat(temperature): + MISTRAL_API_ENDPOINT = os.getenv("MISTRAL_API_ENDPOINT") + MISTRAL_API_KEY = os.getenv("MISTRAL_API_KEY") + MISTRAL_MODEL = os.getenv("MISTRAL_MODEL") + kwargs = { + "mistral_api_key": MISTRAL_API_KEY, + "temperature": temperature, + } + if MISTRAL_API_ENDPOINT: + kwargs["endpoint"] = MISTRAL_API_ENDPOINT + if MISTRAL_MODEL: + kwargs["model"] = MISTRAL_MODEL + return ChatMistralAI(**kwargs) + + MAP_LLM_TYPE_TO_CHAT_MODEL = { "azure": init_azure_chat, "bedrock": init_bedrock, "openai": init_openai_chat, "vertex": init_vertex_chat, + "mistral": init_mistral_chat, } diff --git a/example-apps/chatbot-rag-app/env.example b/example-apps/chatbot-rag-app/env.example index 037740f9..cb8081c1 100644 --- a/example-apps/chatbot-rag-app/env.example +++ b/example-apps/chatbot-rag-app/env.example @@ -34,3 +34,9 @@ ES_INDEX_CHAT_HISTORY=workplace-app-docs-chat-history # VERTEX_PROJECT_ID= # VERTEX_REGION= # GOOGLE_APPLICATION_CREDENTIALS= + +# Uncomment and complete if you want to use Mistral AI +# LLM_TYPE=mistral +# MISTRAL_API_KEY= +# MISTRAL_API_ENDPOINT= +# MISTRAL_MODEL= diff --git a/example-apps/chatbot-rag-app/requirements.in b/example-apps/chatbot-rag-app/requirements.in index b7a615bf..840c43ae 100644 --- a/example-apps/chatbot-rag-app/requirements.in +++ b/example-apps/chatbot-rag-app/requirements.in @@ -20,6 +20,9 @@ grpcio-status # BedRock dependencies boto3 +# Mistral dependencies +langchain-mistralai + # TBD if these are still needed exceptiongroup importlib-metadata diff --git a/example-apps/chatbot-rag-app/requirements.txt b/example-apps/chatbot-rag-app/requirements.txt index feedebbb..d5129714 100644 --- a/example-apps/chatbot-rag-app/requirements.txt +++ b/example-apps/chatbot-rag-app/requirements.txt @@ -133,13 +133,13 @@ jsonpatch==1.33 # langchain-core jsonpointer==2.4 # via jsonpatch -langchain==0.0.333 +langchain==0.1.9 # via -r requirements.in langchain-core==0.1.23 # via langchain-elasticsearch langchain-elasticsearch==0.1.0 # via -r requirements.in -langsmith==0.0.87 +langsmith==0.1.10 # via # langchain # langchain-core @@ -195,12 +195,12 @@ pyasn1==0.5.0 # rsa pyasn1-modules==0.3.0 # via google-auth -pydantic==2.3.0 +pydantic==2.5.2 # via # langchain # langchain-core # langsmith -pydantic-core==2.6.3 +pydantic-core==2.14.5 # via pydantic pyproject-hooks==1.0.0 # via build @@ -268,6 +268,8 @@ yarl==1.9.2 zipp==3.17.0 # via importlib-metadata +langchain-mistralai==0.0.5 + # via -r requirements.in # The following packages are considered to be unsafe in a requirements file: # pip # setuptools