diff --git a/ProductivitySuite/README.md b/ProductivitySuite/README.md index a5eb5735ec..dc3f233a9f 100644 --- a/ProductivitySuite/README.md +++ b/ProductivitySuite/README.md @@ -38,10 +38,7 @@ flowchart LR direction LR LLM_CG([LLM MicroService]):::blue end - subgraph FaqGen-MegaService["FaqGen MegaService "] - direction LR - LLM_F([LLM MicroService]):::blue - end + subgraph UserInterface[" User Interface "] direction LR a([User Input Query]):::orchid @@ -63,7 +60,7 @@ flowchart LR LLM_gen_CG{{LLM Service
}} GW_CG([CodeGen GateWay
]):::orange LLM_gen_F{{LLM Service
}} - GW_F([FaqGen GateWay
]):::orange + PR([Prompt Registry MicroService]):::blue CH([Chat History MicroService]):::blue MDB{{Mongo DB

}} @@ -118,11 +115,6 @@ flowchart LR direction LR LLM_CG <-.-> LLM_gen_CG - %% Questions interaction - direction LR - UI --> GW_F - GW_F <==> FaqGen-MegaService - %% Embedding service flow direction LR @@ -158,10 +150,6 @@ Engage in intelligent conversations with your documents using our advanced **Ret Summarize lengthy documents or articles, enabling you to grasp key takeaways quickly. Save time and effort with our intelligent summarization feature! -### ❓ FAQ Generation - -Effortlessly create comprehensive FAQs based on your documents. Ensure your users have access to the information they need with minimal effort! - ### 💻 Code Generation Boost your coding productivity by providing a description of the functionality you require. Our application generates corresponding code snippets, saving you valuable time and effort! diff --git a/ProductivitySuite/assets/img/Login_page.png b/ProductivitySuite/assets/img/Login_page.png index 0c325e5f2e..7d56b16728 100644 Binary files a/ProductivitySuite/assets/img/Login_page.png and b/ProductivitySuite/assets/img/Login_page.png differ diff --git a/ProductivitySuite/assets/img/chat_qna_init.png b/ProductivitySuite/assets/img/chat_qna_init.png index c13630594b..1179175fb6 100644 Binary files a/ProductivitySuite/assets/img/chat_qna_init.png and b/ProductivitySuite/assets/img/chat_qna_init.png differ diff --git a/ProductivitySuite/assets/img/chatqna_with_conversation.png b/ProductivitySuite/assets/img/chatqna_with_conversation.png index 1dad3a099e..0fcd4c7921 100644 Binary files a/ProductivitySuite/assets/img/chatqna_with_conversation.png and b/ProductivitySuite/assets/img/chatqna_with_conversation.png differ diff --git a/ProductivitySuite/assets/img/codegen.png b/ProductivitySuite/assets/img/codegen.png index a4d38f6f42..70620a448b 100644 Binary files a/ProductivitySuite/assets/img/codegen.png and b/ProductivitySuite/assets/img/codegen.png differ diff --git a/ProductivitySuite/assets/img/data_source.png b/ProductivitySuite/assets/img/data_source.png index ae45e12238..6afe20afd4 100644 Binary files a/ProductivitySuite/assets/img/data_source.png and b/ProductivitySuite/assets/img/data_source.png differ diff --git a/ProductivitySuite/assets/img/doc_summary.png b/ProductivitySuite/assets/img/doc_summary.png new file mode 100644 index 0000000000..f279606f38 Binary files /dev/null and b/ProductivitySuite/assets/img/doc_summary.png differ diff --git a/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md b/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md index 8191b7cc70..af63141330 100644 --- a/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md +++ b/ProductivitySuite/docker_compose/intel/cpu/xeon/README.md @@ -108,68 +108,46 @@ Since the `compose.yaml` will consume some environment variables, you need to se export host_ip="External_Public_IP" ``` -**Export the value of your Huggingface API token to the `your_hf_api_token` environment variable** +**Export the value of your Huggingface API token to the `HUGGINGFACEHUB_API_TOKEN` environment variable** > Change the Your_Huggingface_API_Token below with tyour actual Huggingface API Token value ``` -export your_hf_api_token="Your_Huggingface_API_Token" +export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" ``` **Append the value of the public IP address to the no_proxy list** ``` -export your_no_proxy=${your_no_proxy},"External_Public_IP" +export no_proxy=${no_proxy},"External_Public_IP" ``` ```bash -export MONGO_HOST=${host_ip} -export MONGO_PORT=27017 -export DB_NAME="test" -export COLLECTION_NAME="Conversations" +export DB_NAME="opea" export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export RERANK_MODEL_ID="BAAI/bge-reranker-base" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export LLM_MODEL_ID_CODEGEN="meta-llama/CodeLlama-7b-hf" -export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006" -export TEI_RERANKING_ENDPOINT="http://${host_ip}:8808" -export TGI_LLM_ENDPOINT="http://${host_ip}:9009" -export REDIS_URL="redis://${host_ip}:6379" export INDEX_NAME="rag-redis" -export HUGGINGFACEHUB_API_TOKEN=${your_hf_api_token} -export MEGA_SERVICE_HOST_IP=${host_ip} -export EMBEDDING_SERVICE_HOST_IP=${host_ip} -export RETRIEVER_SERVICE_HOST_IP=${host_ip} -export RERANK_SERVICE_HOST_IP=${host_ip} -export LLM_SERVICE_HOST_IP=${host_ip} -export LLM_SERVICE_HOST_IP_DOCSUM=${host_ip} -export LLM_SERVICE_HOST_IP_FAQGEN=${host_ip} -export LLM_SERVICE_HOST_IP_CODEGEN=${host_ip} -export LLM_SERVICE_HOST_IP_CHATQNA=${host_ip} -export TGI_LLM_ENDPOINT_CHATQNA="http://${host_ip}:9009" -export TGI_LLM_ENDPOINT_CODEGEN="http://${host_ip}:8028" -export TGI_LLM_ENDPOINT_FAQGEN="http://${host_ip}:9009" -export TGI_LLM_ENDPOINT_DOCSUM="http://${host_ip}:9009" +export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${host_ip}:8888/v1/chatqna" -export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:5000/v1/dataprep/delete" +export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/delete" export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${host_ip}:7778/v1/codegen" export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum" -export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:5000/v1/dataprep/ingest" -export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:5000/v1/dataprep/get" +export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/ingest" +export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get" export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create" export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create" export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete" export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get" export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6018/v1/prompt/get" export PROMPT_SERVICE_CREATE_ENDPOINT="http://${host_ip}:6018/v1/prompt/create" +export PROMPT_SERVICE_DELETE_ENDPOINT="http://${host_ip}:6018/v1/prompt/delete" export KEYCLOAK_SERVICE_ENDPOINT="http://${host_ip}:8080" -export LLM_SERVICE_HOST_PORT_FAQGEN=9002 -export LLM_SERVICE_HOST_PORT_CODEGEN=9001 -export LLM_SERVICE_HOST_PORT_DOCSUM=9003 -export PROMPT_COLLECTION_NAME="prompt" -export RERANK_SERVER_PORT=8808 -export EMBEDDING_SERVER_PORT=6006 -export LLM_SERVER_PORT=9009 +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" + +#Set no proxy +export no_proxy="$no_proxy,tgi_service_codegen,llm_codegen,tei-embedding-service,tei-reranking-service,chatqna-xeon-backend-server,retriever,tgi-service,redis-vector-db,whisper,llm-docsum-tgi,docsum-xeon-backend-server,mongo,codegen" ``` Note: Please replace with `host_ip` with you external IP address, do not use localhost. @@ -203,16 +181,7 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det -H 'Content-Type: application/json' ``` -2. Embedding Microservice - - ```bash - curl http://${host_ip}:6000/v1/embeddings\ - -X POST \ - -d '{"text":"hello"}' \ - -H 'Content-Type: application/json' - ``` - -3. Retriever Microservice +2. Retriever Microservice To consume the retriever microservice, you need to generate a mock embedding vector by Python script. The length of embedding vector is determined by the embedding model. @@ -222,13 +191,13 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det ```bash export your_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)") - curl http://${host_ip}:7000/v1/retrieval \ + curl http://${host_ip}:7001/v1/retrieval \ -X POST \ -d "{\"text\":\"test\",\"embedding\":${your_embedding}}" \ -H 'Content-Type: application/json' ``` -4. TEI Reranking Service +3. TEI Reranking Service ```bash curl http://${host_ip}:8808/rerank \ @@ -237,16 +206,7 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det -H 'Content-Type: application/json' ``` -5. Reranking Microservice - - ```bash - curl http://${host_ip}:8000/v1/reranking\ - -X POST \ - -d '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' \ - -H 'Content-Type: application/json' - ``` - -6. LLM backend Service (ChatQnA, DocSum, FAQGen) +4. LLM backend Service (ChatQnA, DocSum) ```bash curl http://${host_ip}:9009/generate \ @@ -255,7 +215,7 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det -H 'Content-Type: application/json' ``` -7. LLM backend Service (CodeGen) +5. LLM backend Service (CodeGen) ```bash curl http://${host_ip}:8028/generate \ @@ -264,59 +224,42 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det -H 'Content-Type: application/json' ``` -8. ChatQnA LLM Microservice +6. CodeGen LLM Microservice ```bash - curl http://${host_ip}:9000/v1/chat/completions\ + curl http://${host_ip}:9001/v1/chat/completions\ -X POST \ - -d '{"query":"What is Deep Learning?","max_tokens":17,"top_k":10,"top_p":0.95,"typical_p":0.95,"temperature":0.01,"repetition_penalty":1.03,"stream":true}' \ + -d '{"query":"def print_hello_world():"}' \ -H 'Content-Type: application/json' ``` -9. CodeGen LLM Microservice +7. DocSum LLM Microservice ```bash - curl http://${host_ip}:9001/v1/chat/completions\ + curl http://${host_ip}:9003/v1/docsum\ -X POST \ - -d '{"query":"def print_hello_world():"}' \ + -d '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5", "type": "text"}' \ -H 'Content-Type: application/json' ``` -10. DocSum LLM Microservice - - ```bash - curl http://${host_ip}:9003/v1/docsum\ - -X POST \ - -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5"}' \ - -H 'Content-Type: application/json' - ``` - -11. FAQGen LLM Microservice +8. ChatQnA MegaService - ```bash - curl http://${host_ip}:9002/v1/faqgen\ - -X POST \ - -d '{"query":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5"}' \ - -H 'Content-Type: application/json' - ``` - -12. ChatQnA MegaService - - ```bash - curl http://${host_ip}:8888/v1/chatqna -H "Content-Type: application/json" -d '{ - "messages": "What is the revenue of Nike in 2023?" - }' - ``` + ```bash + curl http://${host_ip}:8888/v1/chatqna -H "Content-Type: application/json" -d '{ + "messages": "What is the revenue of Nike in 2023?" + }' + ``` -13. DocSum MegaService +9. DocSum MegaService - ```bash - curl http://${host_ip}:8890/v1/docsum -H "Content-Type: application/json" -d '{ - "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5." - }' - ``` + ```bash + curl http://${host_ip}:8890/v1/docsum -H "Content-Type: application/json" -d '{ + "messages": "Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5.", + "type": "text" + }' + ``` -14. CodeGen MegaService +10. CodeGen MegaService ```bash curl http://${host_ip}:7778/v1/codegen -H "Content-Type: application/json" -d '{ @@ -324,7 +267,7 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det }' ``` -15. Dataprep Microservice +11. Dataprep Microservice If you want to update the default knowledge base, you can use the following commands: @@ -374,13 +317,13 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det -H "Content-Type: application/json" ``` -16. Prompt Registry Microservice +12. Prompt Registry Microservice If you want to update the default Prompts in the application for your user, you can use the following commands: ```bash curl -X 'POST' \ - http://{host_ip}:6018/v1/prompt/create \ + "http://${host_ip}:6018/v1/prompt/create" \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -d '{ @@ -392,14 +335,14 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det ```bash curl -X 'POST' \ - http://{host_ip}:6018/v1/prompt/get \ + "http://${host_ip}:6018/v1/prompt/get" \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -d '{ "user": "test"}' curl -X 'POST' \ - http://{host_ip}:6018/v1/prompt/get \ + "http://${host_ip}:6018/v1/prompt/get" \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -d '{ @@ -410,14 +353,14 @@ Please refer to **[keycloak_setup_guide](keycloak_setup_guide.md)** for more det ```bash curl -X 'POST' \ - http://{host_ip}:6018/v1/prompt/delete \ + "http://${host_ip}:6018/v1/prompt/delete" \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -d '{ "user": "test", "prompt_id":"{prompt_id to be deleted}"}' ``` -17. Chat History Microservice +13. Chat History Microservice To validate the chatHistory Microservice, you can use the following commands. @@ -527,15 +470,4 @@ Here're some of the project's features: #### Screenshots -![project-screenshot](../../../../assets/img/doc_summary_paste.png) -![project-screenshot](../../../../assets/img/doc_summary_file.png) - -### ❓ FAQ Generator - -- **Generate FAQs from Text via Pasting**: Paste the text to into the text box, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below. - -- **Generate FAQs from Text via txt file Upload**: Upload the file in the Upload bar, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below. - -#### Screenshots - -![project-screenshot](../../../../assets/img/faq_generator.png) +![project-screenshot](../../../../assets/img/doc_summary.png) diff --git a/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml b/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml index 2a4351d5da..00a16c1670 100644 --- a/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml +++ b/ProductivitySuite/docker_compose/intel/cpu/xeon/compose.yaml @@ -25,10 +25,10 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - REDIS_URL: ${REDIS_URL} + REDIS_URL: redis://redis-vector-db:6379 REDIS_HOST: redis-vector-db INDEX_NAME: ${INDEX_NAME} - TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80 HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} DATAPREP_TYPE: ${DATAPREP_TYPE} LOGFLAG: ${LOGFLAG} @@ -57,39 +57,22 @@ services: interval: 10s timeout: 10s retries: 60 - embedding: - image: ${REGISTRY:-opea}/embedding:${TAG:-latest} - container_name: embedding-server - depends_on: - tei-embedding-service: - condition: service_healthy - ports: - - "6000:6000" - ipc: host - environment: - no_proxy: ${no_proxy} - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} - HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} - LOGFLAG: ${LOGFLAG} - restart: unless-stopped retriever: image: ${REGISTRY:-opea}/retriever:${TAG:-latest} container_name: retriever-redis-server depends_on: - redis-vector-db ports: - - "7000:7000" + - "7001:7000" ipc: host environment: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - REDIS_URL: ${REDIS_URL} + REDIS_URL: redis://redis-vector-db:6379 REDIS_HOST: redis-vector-db INDEX_NAME: ${INDEX_NAME} - TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + TEI_EMBEDDING_ENDPOINT: http://tei-embedding-service:80 HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} LOGFLAG: ${LOGFLAG} RETRIEVER_COMPONENT_NAME: "OPEA_RETRIEVER_REDIS" @@ -116,27 +99,7 @@ services: interval: 10s timeout: 10s retries: 60 - reranking: - image: ${REGISTRY:-opea}/reranking:${TAG:-latest} - container_name: reranking-tei-xeon-server - depends_on: - tei-reranking-service: - condition: service_healthy - ports: - - "8000:8000" - ipc: host - environment: - no_proxy: ${no_proxy} - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - RERANK_TYPE: ${RERANK_TYPE} - TEI_RERANKING_ENDPOINT: ${TEI_RERANKING_ENDPOINT} - HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} - HF_HUB_DISABLE_PROGRESS_BARS: 1 - HF_HUB_ENABLE_HF_TRANSFER: 0 - LOGFLAG: ${LOGFLAG} - restart: unless-stopped - tgi_service: + tgi-service: image: ghcr.io/huggingface/text-generation-inference:2.4.0-intel-cpu container_name: tgi-service ports: @@ -158,26 +121,6 @@ services: timeout: 10s retries: 100 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 - llm: - image: ${REGISTRY:-opea}/llm-textgen:${TAG:-latest} - container_name: llm-textgen-server - depends_on: - tgi_service: - condition: service_healthy - ports: - - "9000:9000" - ipc: host - environment: - no_proxy: ${no_proxy} - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_CHATQNA} - LLM_MODEL_ID: ${LLM_MODEL_ID} - HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} - HF_HUB_DISABLE_PROGRESS_BARS: 1 - HF_HUB_ENABLE_HF_TRANSFER: 0 - LOGFLAG: ${LOGFLAG} - restart: unless-stopped chatqna-xeon-backend-server: image: ${REGISTRY:-opea}/chatqna:${TAG:-latest} container_name: chatqna-xeon-backend-server @@ -192,27 +135,21 @@ services: condition: service_started tei-reranking-service: condition: service_healthy - tgi_service: + tgi-service: condition: service_healthy - embedding: - condition: service_started - reranking: - condition: service_started - llm: - condition: service_started ports: - "8888:8888" environment: no_proxy: ${no_proxy} https_proxy: ${https_proxy} http_proxy: ${http_proxy} - MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP} - EMBEDDING_SERVER_HOST_IP: ${EMBEDDING_SERVICE_HOST_IP} + MEGA_SERVICE_HOST_IP: chatqna-xeon-backend-server + EMBEDDING_SERVER_HOST_IP: tei-embedding-service EMBEDDING_SERVER_PORT: ${EMBEDDING_SERVER_PORT:-80} - RETRIEVER_SERVICE_HOST_IP: ${RETRIEVER_SERVICE_HOST_IP} - RERANK_SERVER_HOST_IP: ${RERANK_SERVICE_HOST_IP} + RETRIEVER_SERVICE_HOST_IP: retriever + RERANK_SERVER_HOST_IP: tei-reranking-service RERANK_SERVER_PORT: ${RERANK_SERVER_PORT:-80} - LLM_SERVER_HOST_IP: ${LLM_SERVICE_HOST_IP_CHATQNA} + LLM_SERVER_HOST_IP: tgi-service LLM_SERVER_PORT: ${LLM_SERVER_PORT:-80} LLM_MODEL: ${LLM_MODEL_ID} LOGFLAG: ${LOGFLAG} @@ -251,8 +188,8 @@ services: no_proxy: ${no_proxy} http_proxy: ${http_proxy} https_proxy: ${https_proxy} - LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_CODEGEN} - LLM_MODEL_ID: ${LLM_MODEL_ID} + LLM_ENDPOINT: http://tgi_service_codegen:80 + LLM_MODEL_ID: ${LLM_MODEL_ID_CODEGEN} HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} LOGFLAG: ${LOGFLAG} restart: unless-stopped @@ -260,7 +197,7 @@ services: image: ${REGISTRY:-opea}/codegen:${TAG:-latest} container_name: codegen-xeon-backend-server depends_on: - - llm + - llm_codegen ports: - "7778:7778" environment: @@ -268,33 +205,11 @@ services: https_proxy: ${https_proxy} http_proxy: ${http_proxy} MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP} - LLM_SERVICE_HOST_IP: ${LLM_SERVICE_HOST_IP_CODEGEN} - LLM_SERVICE_PORT: ${LLM_SERVICE_HOST_PORT_CODEGEN} + LLM_SERVICE_HOST_IP: llm_codegen + LLM_SERVICE_PORT: ${LLM_SERVICE_HOST_PORT_CODEGEN:-9000} LOGFLAG: ${LOGFLAG} ipc: host restart: always - llm_faqgen: - image: ${REGISTRY:-opea}/llm-faqgen:${TAG:-latest} - container_name: llm-faqgen-server - depends_on: - tgi_service: - condition: service_healthy - ports: - - "9002:9000" - ipc: host - environment: - no_proxy: ${no_proxy} - http_proxy: ${http_proxy} - https_proxy: ${https_proxy} - LLM_ENDPOINT: ${TGI_LLM_ENDPOINT_FAQGEN} - HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} - LANGCHAIN_API_KEY: ${LANGCHAIN_API_KEY} - LANGCHAIN_TRACING_V2: ${LANGCHAIN_TRACING_V2} - LANGCHAIN_PROJECT: "opea-llm-service" - LLM_MODEL_ID: ${LLM_MODEL_ID} - FAQGen_COMPONENT_NAME: ${FAQGen_COMPONENT_NAME} - LOGFLAG: ${LOGFLAG:-False} - restart: unless-stopped mongo: image: mongo:7.0.11 container_name: mongodb @@ -316,9 +231,9 @@ services: http_proxy: ${http_proxy} no_proxy: ${no_proxy} https_proxy: ${https_proxy} - MONGO_HOST: ${MONGO_HOST} - MONGO_PORT: ${MONGO_PORT} - COLLECTION_NAME: ${COLLECTION_NAME} + MONGO_HOST: ${MONGO_HOST:-mongo} + MONGO_PORT: ${MONGO_PORT:-27017} + COLLECTION_NAME: ${COLLECTION_NAME:-Conversations} LOGFLAG: ${LOGFLAG} restart: unless-stopped @@ -332,9 +247,9 @@ services: http_proxy: ${http_proxy} https_proxy: ${https_proxy} no_proxy: ${no_proxy} - MONGO_HOST: ${MONGO_HOST} - MONGO_PORT: ${MONGO_PORT} - COLLECTION_NAME: ${PROMPT_COLLECTION_NAME} + MONGO_HOST: ${MONGO_HOST:-mongo} + MONGO_PORT: ${MONGO_PORT:-27017} + COLLECTION_NAME: ${PROMPT_COLLECTION_NAME:-prompt} LOGFLAG: ${LOGFLAG} restart: unless-stopped keycloak: @@ -367,10 +282,64 @@ services: - APP_CHAT_HISTORY_GET_ENDPOINT=${CHAT_HISTORY_GET_ENDPOINT} - APP_PROMPT_SERVICE_GET_ENDPOINT=${PROMPT_SERVICE_GET_ENDPOINT} - APP_PROMPT_SERVICE_CREATE_ENDPOINT=${PROMPT_SERVICE_CREATE_ENDPOINT} + - APP_PROMPT_SERVICE_DELETE_ENDPOINT=${PROMPT_SERVICE_DELETE_ENDPOINT} - APP_KEYCLOAK_SERVICE_ENDPOINT=${KEYCLOAK_SERVICE_ENDPOINT} - LOGFLAG=${LOGFLAG} ipc: host restart: always + + llm-docsum-tgi: + image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} + container_name: docsum-xeon-llm-server + depends_on: + tgi-service: + condition: service_healthy + ports: + - ${LLM_PORT_DOCSUM:-9003}:9000 + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: http://tgi-service:80 + LLM_MODEL_ID: ${LLM_MODEL_ID} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + MAX_INPUT_TOKENS: ${MAX_INPUT_TOKENS:-1024} + MAX_TOTAL_TOKENS: ${MAX_TOTAL_TOKENS:-2048} + DocSum_COMPONENT_NAME: ${DocSum_COMPONENT_NAME} + LOGFLAG: ${LOGFLAG:-False} + restart: unless-stopped + docsum-xeon-backend-server: + image: ${REGISTRY:-opea}/docsum:${TAG:-latest} + container_name: docsum-xeon-backend-server + depends_on: + - tgi-service + - llm-docsum-tgi + ports: + - "${BACKEND_SERVICE_PORT_DOCSUM:-8890}:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=llm-docsum-tgi + - LLM_SERVICE_PORT=9000 + - ASR_SERVICE_HOST_IP=whisper + - OTEL_SDK_DISABLED=true + - OTEL_TRACES_EXPORTER=none + ipc: host + restart: always + whisper: + image: ${REGISTRY:-opea}/whisper:${TAG:-latest} + container_name: whisper-server + ports: + - "7066:7066" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped networks: default: driver: bridge diff --git a/ProductivitySuite/docker_compose/intel/cpu/xeon/set_env.sh b/ProductivitySuite/docker_compose/intel/cpu/xeon/set_env.sh old mode 100644 new mode 100755 index 0d0b8d6059..04d53e3639 --- a/ProductivitySuite/docker_compose/intel/cpu/xeon/set_env.sh +++ b/ProductivitySuite/docker_compose/intel/cpu/xeon/set_env.sh @@ -4,47 +4,28 @@ pushd "../../../../../" > /dev/null source .set_env.sh popd > /dev/null -export MONGO_HOST=${host_ip} -export MONGO_PORT=27017 export DB_NAME="opea" -export COLLECTION_NAME="Conversations" export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" export RERANK_MODEL_ID="BAAI/bge-reranker-base" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" -export LLM_MODEL_ID_CODEGEN="meta-llama/CodeLlama-7b-hf" -export TEI_EMBEDDING_ENDPOINT="http://${host_ip}:6006" -export TEI_RERANKING_ENDPOINT="http://${host_ip}:8808" -export TGI_LLM_ENDPOINT="http://${host_ip}:9009" -export REDIS_URL="redis://${host_ip}:6379" +export LLM_MODEL_ID_CODEGEN="Intel/neural-chat-7b-v3-3" export INDEX_NAME="rag-redis" export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} -export MEGA_SERVICE_HOST_IP=${host_ip} -export EMBEDDING_SERVICE_HOST_IP=${host_ip} -export RETRIEVER_SERVICE_HOST_IP=${host_ip} -export RERANK_SERVICE_HOST_IP=${host_ip} -export LLM_SERVICE_HOST_IP=${host_ip} -export LLM_SERVICE_HOST_IP_DOCSUM=${host_ip} -export LLM_SERVICE_HOST_IP_FAQGEN=${host_ip} -export LLM_SERVICE_HOST_IP_CODEGEN=${host_ip} -export LLM_SERVICE_HOST_IP_CHATQNA=${host_ip} -export TGI_LLM_ENDPOINT_CHATQNA="http://${host_ip}:9009" -export TGI_LLM_ENDPOINT_CODEGEN="http://${host_ip}:8028" -export TGI_LLM_ENDPOINT_FAQGEN="http://${host_ip}:9009" -export TGI_LLM_ENDPOINT_DOCSUM="http://${host_ip}:9009" export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${host_ip}:8888/v1/chatqna" -export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:5000/v1/dataprep/delete" +export DATAPREP_DELETE_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/delete" export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${host_ip}:7778/v1/codegen" export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${host_ip}:8890/v1/docsum" -export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:5000/v1/dataprep/ingest" -export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:5000/v1/dataprep/get" +export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/ingest" +export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get" export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create" export CHAT_HISTORY_CREATE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/create" export CHAT_HISTORY_DELETE_ENDPOINT="http://${host_ip}:6012/v1/chathistory/delete" export CHAT_HISTORY_GET_ENDPOINT="http://${host_ip}:6012/v1/chathistory/get" export PROMPT_SERVICE_GET_ENDPOINT="http://${host_ip}:6018/v1/prompt/get" export PROMPT_SERVICE_CREATE_ENDPOINT="http://${host_ip}:6018/v1/prompt/create" +export PROMPT_SERVICE_DELETE_ENDPOINT="http://${host_ip}:6018/v1/prompt/delete" export KEYCLOAK_SERVICE_ENDPOINT="http://${host_ip}:8080" -export LLM_SERVICE_HOST_PORT_FAQGEN=9002 -export LLM_SERVICE_HOST_PORT_CODEGEN=9001 -export LLM_SERVICE_HOST_PORT_DOCSUM=9003 -export PROMPT_COLLECTION_NAME="prompt" +export DocSum_COMPONENT_NAME="OpeaDocSumTgi" + +#Set no proxy +export no_proxy="$no_proxy,tgi_service_codegen,llm_codegen,tei-embedding-service,tei-reranking-service,chatqna-xeon-backend-server,retriever,tgi-service,redis-vector-db,whisper,llm-docsum-tgi,docsum-xeon-backend-server,mongo,codegen" diff --git a/ProductivitySuite/docker_image_build/build.yaml b/ProductivitySuite/docker_image_build/build.yaml index 34a3296c12..0401d6cfd0 100644 --- a/ProductivitySuite/docker_image_build/build.yaml +++ b/ProductivitySuite/docker_image_build/build.yaml @@ -77,3 +77,27 @@ services: context: GenAIComps dockerfile: comps/llms/src/faq-generation/Dockerfile image: ${REGISTRY:-opea}/llm-faqgen:${TAG:-latest} + docsum: + build: + args: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: ${no_proxy} + context: ../../DocSum/ + dockerfile: ./Dockerfile + image: ${REGISTRY:-opea}/docsum:${TAG:-latest} + llm-docsum: + build: + context: GenAIComps + dockerfile: comps/llms/src/doc-summarization/Dockerfile + extends: docsum + image: ${REGISTRY:-opea}/llm-docsum:${TAG:-latest} + whisper: + build: + args: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + context: GenAIComps + dockerfile: comps/third_parties/whisper/src/Dockerfile + extends: docsum + image: ${REGISTRY:-opea}/whisper:${TAG:-latest} diff --git a/ProductivitySuite/tests/test_compose_on_xeon.sh b/ProductivitySuite/tests/test_compose_on_xeon.sh index 7a5ba4be4f..34d9a96691 100755 --- a/ProductivitySuite/tests/test_compose_on_xeon.sh +++ b/ProductivitySuite/tests/test_compose_on_xeon.sh @@ -30,58 +30,31 @@ function build_docker_images() { function start_services() { cd $WORKPATH/docker_compose/intel/cpu/xeon/ + export DB_NAME="opea" export EMBEDDING_MODEL_ID="BAAI/bge-base-en-v1.5" - export RERANK_TYPE="tei" export RERANK_MODEL_ID="BAAI/bge-reranker-base" export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" export LLM_MODEL_ID_CODEGEN="Intel/neural-chat-7b-v3-3" - export DATAPREP_TYPE="redis" - export RETRIEVER_TYPE="redis" - export TEI_EMBEDDING_ENDPOINT="http://${ip_address}:6006" - export TEI_RERANKING_ENDPOINT="http://${ip_address}:8808" - export TGI_LLM_ENDPOINT="http://${ip_address}:9009" - export REDIS_URL="redis://${ip_address}:6379" - export REDIS_HOST=${ip_address} export INDEX_NAME="rag-redis" export HUGGINGFACEHUB_API_TOKEN=${HUGGINGFACEHUB_API_TOKEN} - export MEGA_SERVICE_HOST_IP=${ip_address} - export EMBEDDING_SERVICE_HOST_IP=${ip_address} - export RETRIEVER_SERVICE_HOST_IP=${ip_address} - export RERANK_SERVICE_HOST_IP=${ip_address} - export LLM_SERVICE_HOST_IP=${ip_address} - export LLM_SERVICE_HOST_IP_DOCSUM=${ip_address} - export LLM_SERVICE_HOST_IP_FAQGEN=${ip_address} - export LLM_SERVICE_HOST_IP_CODEGEN=${ip_address} - export LLM_SERVICE_HOST_IP_CHATQNA=${ip_address} - export TGI_LLM_ENDPOINT_CHATQNA="http://${ip_address}:9009" - export TGI_LLM_ENDPOINT_CODEGEN="http://${ip_address}:8028" - export TGI_LLM_ENDPOINT_FAQGEN="http://${ip_address}:9009" - export TGI_LLM_ENDPOINT_DOCSUM="http://${ip_address}:9009" export BACKEND_SERVICE_ENDPOINT_CHATQNA="http://${ip_address}:8888/v1/chatqna" - export DATAPREP_DELETE_FILE_ENDPOINT="http://${ip_address}:5000/v1/dataprep/delete" + export DATAPREP_DELETE_FILE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/delete" export BACKEND_SERVICE_ENDPOINT_CODEGEN="http://${ip_address}:7778/v1/codegen" - export DATAPREP_SERVICE_ENDPOINT="http://${ip_address}:5000/v1/dataprep/ingest" - export DATAPREP_GET_FILE_ENDPOINT="http://${ip_address}:5000/v1/dataprep/get" + export BACKEND_SERVICE_ENDPOINT_DOCSUM="http://${ip_address}:8890/v1/docsum" + export DATAPREP_SERVICE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/ingest" + export DATAPREP_GET_FILE_ENDPOINT="http://${ip_address}:6007/v1/dataprep/get" export CHAT_HISTORY_CREATE_ENDPOINT="http://${ip_address}:6012/v1/chathistory/create" export CHAT_HISTORY_CREATE_ENDPOINT="http://${ip_address}:6012/v1/chathistory/create" export CHAT_HISTORY_DELETE_ENDPOINT="http://${ip_address}:6012/v1/chathistory/delete" export CHAT_HISTORY_GET_ENDPOINT="http://${ip_address}:6012/v1/chathistory/get" export PROMPT_SERVICE_GET_ENDPOINT="http://${ip_address}:6018/v1/prompt/get" export PROMPT_SERVICE_CREATE_ENDPOINT="http://${ip_address}:6018/v1/prompt/create" + export PROMPT_SERVICE_DELETE_ENDPOINT="http://${ip_address}:6018/v1/prompt/delete" export KEYCLOAK_SERVICE_ENDPOINT="http://${ip_address}:8080" - export MONGO_HOST=${ip_address} - export MONGO_PORT=27017 - export DB_NAME="opea" - export COLLECTION_NAME="Conversations" - export LLM_SERVICE_HOST_PORT_FAQGEN=9002 - export LLM_SERVICE_HOST_PORT_CODEGEN=9001 - export RERANK_SERVER_PORT=8808 - export EMBEDDING_SERVER_PORT=6006 - export LLM_SERVER_PORT=9009 - export PROMPT_COLLECTION_NAME="prompt" + export DocSum_COMPONENT_NAME="OpeaDocSumTgi" export host_ip=${ip_address} - export FAQGen_COMPONENT_NAME="OpeaFaqGenTgi" export LOGFLAG=True + export no_proxy="$no_proxy,tgi_service_codegen,llm_codegen,tei-embedding-service,tei-reranking-service,chatqna-xeon-backend-server,retriever,tgi-service,redis-vector-db,whisper,llm-docsum-tgi,docsum-xeon-backend-server,mongo,codegen" # Start Docker Containers docker compose up -d > ${LOG_PATH}/start_services_with_compose.log @@ -141,34 +114,6 @@ function validate_service() { sleep 1s } -function validate_faqgen() { - local URL="$1" - local EXPECTED_RESULT="$2" - local SERVICE_NAME="$3" - local DOCKER_NAME="$4" - local INPUT_DATA="$5" - - local HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL") - if [ "$HTTP_STATUS" -eq 200 ]; then - echo "[ $SERVICE_NAME ] HTTP status is 200. Checking content..." - - local CONTENT=$(curl -s -X POST -d "$INPUT_DATA" -H 'Content-Type: application/json' "$URL" | tee ${LOG_PATH}/${SERVICE_NAME}.log) - - if echo "$CONTENT" | grep -q "$EXPECTED_RESULT"; then - echo "[ $SERVICE_NAME ] Content is as expected." - else - echo "[ $SERVICE_NAME ] Content does not match the expected result: $CONTENT" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - fi - else - echo "[ $SERVICE_NAME ] HTTP status is not 200. Received status was $HTTP_STATUS" - docker logs ${DOCKER_NAME} >> ${LOG_PATH}/${SERVICE_NAME}.log - exit 1 - fi - sleep 1s -} - function validate_microservices() { # Check if the microservices are running correctly. @@ -180,14 +125,6 @@ function validate_microservices() { "tei-embedding-server" \ '{"inputs":"What is Deep Learning?"}' - # embedding microservice - validate_service \ - "${ip_address}:6000/v1/embeddings" \ - '"embedding":[' \ - "embedding-microservice" \ - "embedding-server" \ - '{"input":"What is Deep Learning?"}' - sleep 1m # retrieval can't curl as expected, try to wait for more time # test /v1/dataprep/delete @@ -222,7 +159,7 @@ function validate_microservices() { # retrieval microservice test_embedding=$(python3 -c "import random; embedding = [random.uniform(-1, 1) for _ in range(768)]; print(embedding)") validate_service \ - "${ip_address}:7000/v1/retrieval" \ + "${ip_address}:7001/v1/retrieval" \ "retrieved_docs" \ "retrieval-microservice" \ "retriever-redis-server" \ @@ -236,14 +173,6 @@ function validate_microservices() { "tei-reranking-server" \ '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' - # rerank microservice - validate_service \ - "${ip_address}:8000/v1/reranking" \ - "Deep learning is..." \ - "rerank-microservice" \ - "reranking-tei-xeon-server" \ - '{"initial_query":"What is Deep Learning?", "retrieved_docs": [{"text":"Deep Learning is not..."}, {"text":"Deep learning is..."}]}' - # tgi for llm service validate_service \ "${ip_address}:9009/generate" \ @@ -252,21 +181,6 @@ function validate_microservices() { "tgi-service" \ '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":17, "do_sample": true}}' - # ChatQnA llm microservice - validate_service \ - "${ip_address}:9000/v1/chat/completions" \ - "data: " \ - "llm-microservice" \ - "llm-textgen-server" \ - '{"query":"What is Deep Learning?"}' - - # FAQGen llm microservice - validate_faqgen \ - "${ip_address}:9002/v1/faqgen" \ - "text" \ - "llm_faqgen" \ - "llm-faqgen-server" \ - '{"messages":"Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5."}' # CodeGen llm microservice validate_service \ diff --git a/ProductivitySuite/ui/react/.env.production b/ProductivitySuite/ui/react/.env.production index f881c388ce..a063cc5142 100644 --- a/ProductivitySuite/ui/react/.env.production +++ b/ProductivitySuite/ui/react/.env.production @@ -1,7 +1,6 @@ VITE_BACKEND_SERVICE_ENDPOINT_CHATQNA=APP_BACKEND_SERVICE_ENDPOINT_CHATQNA VITE_BACKEND_SERVICE_ENDPOINT_CODEGEN=APP_BACKEND_SERVICE_ENDPOINT_CODEGEN VITE_BACKEND_SERVICE_ENDPOINT_DOCSUM=APP_BACKEND_SERVICE_ENDPOINT_DOCSUM -VITE_BACKEND_SERVICE_ENDPOINT_FAQGEN=APP_BACKEND_SERVICE_ENDPOINT_FAQGEN VITE_KEYCLOAK_SERVICE_ENDPOINT=APP_KEYCLOAK_SERVICE_ENDPOINT @@ -14,3 +13,4 @@ VITE_CHAT_HISTORY_GET_ENDPOINT=APP_CHAT_HISTORY_GET_ENDPOINT VITE_CHAT_HISTORY_DELETE_ENDPOINT=APP_CHAT_HISTORY_DELETE_ENDPOINT VITE_PROMPT_SERVICE_GET_ENDPOINT=APP_PROMPT_SERVICE_GET_ENDPOINT VITE_PROMPT_SERVICE_CREATE_ENDPOINT=APP_PROMPT_SERVICE_CREATE_ENDPOINT +VITE_PROMPT_SERVICE_DELETE_ENDPOINT=APP_PROMPT_SERVICE_DELETE_ENDPOINT diff --git a/ProductivitySuite/ui/react/README.md b/ProductivitySuite/ui/react/README.md index c7b6d0b931..469a4c70fe 100644 --- a/ProductivitySuite/ui/react/README.md +++ b/ProductivitySuite/ui/react/README.md @@ -41,24 +41,13 @@ Here're some of the project's features: ### DOC SUMMARY -- Summarizing Uploaded Files: Upload files from their local device, then click 'Generate Summary' to summarize the content of the uploaded file. The summary will be displayed on the 'Summary' box. -- Summarizing Text via Pasting: Paste the text to be summarized into the text box, then click 'Generate Summary' to produce a condensed summary of the content, which will be displayed in the 'Summary' box on the right. +- Summarizing Uploaded Files: Upload files from their local device, then click up arrow button to summarize the content of the uploaded file. +- Summarizing Text via Pasting: Paste the text to be summarized into the text box, then click up arrow button to produce a condensed summary of the content, which will be displayed in the new page - Scroll to Bottom: The summarized content will automatically scroll to the bottom. #### Screen Shot -![project-screenshot](../../assets/img/doc_summary_paste.png) -![project-screenshot](../../assets/img/doc_summary_file.png) - -### FAQ Generator - -- Generate FAQs from Text via Pasting: Paste the text to into the text box, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below. - -- Generate FAQs from Text via txt file Upload: Upload the file in the Upload bar, then click 'Generate FAQ' to produce a condensed FAQ of the content, which will be displayed in the 'FAQ' box below. - -#### Screen Shot - -![project-screenshot](../../assets/img/faq_generator.png) +![project-screenshot](../../assets/img/doc_summary.png) ## 🛠️ Get it Running @@ -68,22 +57,43 @@ Here're some of the project's features: 3. create a .env file and add the following variables and values. ``` - VITE_BACKEND_SERVICE_ENDPOINT_CHATQNA='' - VITE_BACKEND_SERVICE_ENDPOINT_CODEGEN='' - VITE_BACKEND_SERVICE_ENDPOINT_DOCSUM='' - VITE_BACKEND_SERVICE_ENDPOINT_FAQGEN='' - VITE_KEYCLOAK_SERVICE_ENDPOINT='' - VITE_DATAPREP_SERVICE_ENDPOINT='' - VITE_DATAPREP_GET_FILE_ENDPOINT='' - VITE_DATAPREP_DELETE_FILE_ENDPOINT='' - VITE_CHAT_HISTORY_CREATE_ENDPOINT='' - VITE_CHAT_HISTORY_GET_ENDPOINT='' - VITE_CHAT_HISTORY_DELETE_ENDPOINT='' - VITE_PROMPT_SERVICE_GET_ENDPOINT='' - VITE_PROMPT_SERVICE_CREATE_ENDPOINT='' + VITE_BACKEND_SERVICE_ENDPOINT_CHATQNA="" + VITE_DATAPREP_DELETE_FILE_ENDPOINT="" + VITE_BACKEND_SERVICE_ENDPOINT_CODEGEN="" + VITE_BACKEND_SERVICE_ENDPOINT_DOCSUM="" + VITE_DATAPREP_SERVICE_ENDPOINT="" + VITE_DATAPREP_GET_FILE_ENDPOINT="" + VITE_CHAT_HISTORY_CREATE_ENDPOINT="" + VITE_CHAT_HISTORY_DELETE_ENDPOINT="" + VITE_CHAT_HISTORY_GET_ENDPOINT="" + VITE_PROMPT_SERVICE_GET_ENDPOINT="" + VITE_PROMPT_SERVICE_CREATE_ENDPOINT="" + VITE_PROMPT_SERVICE_DELETE_ENDPOINT="" + VITE_KEYCLOAK_SERVICE_ENDPOINT="" + VITE_PROMPT_COLLECTION_NAME="prompt" + ``` +4. There is models_config.json file under public folder. It is in the below format. The types key in the below json array shows that the model is used for following types. + ``` -4. Execute `npm install` to install the corresponding dependencies. + [ + + { + "model_name": "Intel/neural-chat-7b-v3-3", + "displayName": "Intel Neural Chat", + "minToken": 100, + "maxToken": 2000, + "types": [ + "chat", + "summary", + "code" + ] + } + + ] + ``` + +5. Execute `npm install` to install the corresponding dependencies. -5. Execute `npm run dev` +6. Execute `npm run dev` -6. open http://localhost:5174 in browser to the see the UI +7. open http://localhost:5174 in browser to the see the UI diff --git a/ProductivitySuite/ui/react/index.html b/ProductivitySuite/ui/react/index.html index fbe87e0fd5..b048d29d77 100644 --- a/ProductivitySuite/ui/react/index.html +++ b/ProductivitySuite/ui/react/index.html @@ -1,18 +1,29 @@ - - - - Conversations UI + + + + + + + + + + + ProductivitySuite UI +
- + diff --git a/ProductivitySuite/ui/react/package.json b/ProductivitySuite/ui/react/package.json index 57e384bfad..7b2fc4f007 100644 --- a/ProductivitySuite/ui/react/package.json +++ b/ProductivitySuite/ui/react/package.json @@ -1,56 +1,92 @@ { - "name": "ui", + "name": "ProductivitySuite", + "version": "0.0.1", + "description": "ProductivitySuite UI - OPEA", + "homepage": ".", "private": true, - "version": "0.0.0", "type": "module", + "engines": { + "node": "20.x" + }, "scripts": { - "dev": "vite", - "build": "tsc && vite build", - "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", - "preview": "vite preview", + "dev": "vite --port 5173", + "build": "vite build", + "preview": "vite preview --port 5173", + "prettier:write": "prettier --write .", "test": "vitest run" }, + "eslintConfig": { + "extends": [ + "react-app", + "react-app/jest" + ] + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, "dependencies": { - "@mantine/core": "^7.11.1", - "@mantine/dropzone": "^7.11.1", - "@mantine/hooks": "^7.11.1", - "@mantine/notifications": "^7.10.2", "@microsoft/fetch-event-source": "^2.0.1", + "@mui/icons-material": "^6.4.1", + "@mui/material": "^6.4.1", + "@mui/styled-engine-sc": "^6.4.0", "@react-keycloak/web": "^3.4.0", - "@reduxjs/toolkit": "^2.2.5", - "@tabler/icons-react": "3.27.1", - "axios": "^1.7.2", + "@reduxjs/toolkit": "^2.5.0", + "axios": "^1.7.9", "keycloak-js": "^25.0.2", - "luxon": "^3.4.4", + "notistack": "^3.0.2", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-markdown": "^9.0.1", - "react-redux": "^9.1.2", - "react-router-dom": "^6.25.1", - "react-syntax-highlighter": "^15.5.0", + "react-markdown": "^8.0.7", + "react-redux": "^9.2.0", + "react-router-dom": "^7.1.1", + "react-syntax-highlighter": "^15.6.1", + "remark-breaks": "^4.0.0", "remark-frontmatter": "^5.0.0", - "remark-gfm": "^4.0.0" + "remark-gfm": "^3.0.1", + "styled-components": "^6.1.14" }, "devDependencies": { - "@testing-library/react": "^16.0.0", - "@types/luxon": "^3.4.2", - "@types/node": "^20.12.12", - "@types/react": "^18.2.66", - "@types/react-dom": "^18.2.22", + "@auth/express": "^0.8.4", + "@babel/plugin-transform-private-property-in-object": "^7.25.9", + "@electron-toolkit/preload": "^3.0.1", + "@mui/types": "^7.2.21", + "@rollup/plugin-terser": "^0.4.4", + "@testing-library/jest-dom": "^5.16.5", + "@testing-library/react": "^13.4.0", + "@testing-library/user-event": "^14.4.3", + "@types/electron-devtools-installer": "^2.2.2", + "@types/jest": "^29.4.0", + "@types/node": "^18.13.0", + "@types/react": "^19.0.2", + "@types/react-dom": "^19.0.2", "@types/react-syntax-highlighter": "^15.5.13", - "@typescript-eslint/eslint-plugin": "^7.2.0", - "@typescript-eslint/parser": "^7.2.0", - "@vitejs/plugin-react": "^4.2.1", - "eslint": "^8.57.0", - "eslint-plugin-react-hooks": "^4.6.0", - "eslint-plugin-react-refresh": "^0.4.6", - "jsdom": "^24.1.0", - "postcss": "^8.4.38", - "postcss-preset-mantine": "^1.15.0", - "postcss-simple-vars": "^7.0.1", - "sass": "1.64.2", - "typescript": "^5.2.2", - "vite": "^5.2.13", - "vitest": "^1.6.0" + "@vitejs/plugin-react": "^4.3.4", + "concurrently": "^7.6.0", + "cors": "^2.8.5", + "cross-env": "^7.0.3", + "dotenv": "^16.4.7", + "express": "^4.21.2", + "nodemon": "^3.1.9", + "prettier": "^3.5.3", + "rollup-plugin-visualizer": "^5.14.0", + "sass": "^1.83.1", + "typescript": "^5.7.3", + "vite": "^5.3.1", + "vite-plugin-compression": "^0.5.1", + "vite-plugin-mkcert": "^1.17.6", + "vite-plugin-sass-dts": "^1.3.30", + "vite-plugin-svgr": "^4.3.0", + "vitest": "^3.1.2", + "wait-on": "^7.0.1", + "webpack-bundle-analyzer": "^4.10.2" } } diff --git a/ProductivitySuite/ui/react/postcss.config.cjs b/ProductivitySuite/ui/react/postcss.config.cjs deleted file mode 100644 index e817f567be..0000000000 --- a/ProductivitySuite/ui/react/postcss.config.cjs +++ /dev/null @@ -1,14 +0,0 @@ -module.exports = { - plugins: { - "postcss-preset-mantine": {}, - "postcss-simple-vars": { - variables: { - "mantine-breakpoint-xs": "36em", - "mantine-breakpoint-sm": "48em", - "mantine-breakpoint-md": "62em", - "mantine-breakpoint-lg": "75em", - "mantine-breakpoint-xl": "88em", - }, - }, - }, -}; diff --git a/ProductivitySuite/ui/react/public/favicon.ico b/ProductivitySuite/ui/react/public/favicon.ico new file mode 100644 index 0000000000..c2c86b859e Binary files /dev/null and b/ProductivitySuite/ui/react/public/favicon.ico differ diff --git a/ProductivitySuite/ui/react/public/logo192.png b/ProductivitySuite/ui/react/public/logo192.png new file mode 100644 index 0000000000..fa313abf53 Binary files /dev/null and b/ProductivitySuite/ui/react/public/logo192.png differ diff --git a/ProductivitySuite/ui/react/public/logo512.png b/ProductivitySuite/ui/react/public/logo512.png new file mode 100644 index 0000000000..bd5d4b5e23 Binary files /dev/null and b/ProductivitySuite/ui/react/public/logo512.png differ diff --git a/ProductivitySuite/ui/react/public/manifest.json b/ProductivitySuite/ui/react/public/manifest.json new file mode 100644 index 0000000000..a3295c9a2a --- /dev/null +++ b/ProductivitySuite/ui/react/public/manifest.json @@ -0,0 +1,25 @@ +{ + "short_name": "ProductivitySuite", + "name": "ProductivitySuite UI", + "icons": [ + { + "src": "favicon.ico", + "sizes": "64x64 32x32 24x24 16x16", + "type": "image/x-icon" + }, + { + "src": "logo192.png", + "type": "image/png", + "sizes": "192x192" + }, + { + "src": "logo512.png", + "type": "image/png", + "sizes": "512x512" + } + ], + "start_url": ".", + "display": "standalone", + "theme_color": "#000000", + "background_color": "#ffffff" +} diff --git a/ProductivitySuite/ui/react/public/model_configs.json b/ProductivitySuite/ui/react/public/model_configs.json new file mode 100644 index 0000000000..cea98dc7d3 --- /dev/null +++ b/ProductivitySuite/ui/react/public/model_configs.json @@ -0,0 +1,9 @@ +[ + { + "model_name": "Intel/neural-chat-7b-v3-3", + "displayName": "Intel Neural Chat", + "minToken": 100, + "maxToken": 2000, + "types": ["chat", "summary", "code"] + } +] diff --git a/ProductivitySuite/ui/react/public/robots.txt b/ProductivitySuite/ui/react/public/robots.txt new file mode 100644 index 0000000000..01b0f9a107 --- /dev/null +++ b/ProductivitySuite/ui/react/public/robots.txt @@ -0,0 +1,2 @@ +# https://www.robotstxt.org/robotstxt.html +User-agent: * diff --git a/ProductivitySuite/ui/react/src/App.scss b/ProductivitySuite/ui/react/src/App.scss index 187764a179..1317587986 100644 --- a/ProductivitySuite/ui/react/src/App.scss +++ b/ProductivitySuite/ui/react/src/App.scss @@ -1,42 +1 @@ -// Copyright (C) 2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -@import "./styles/styles"; - -.root { - @include flex(row, nowrap, flex-start, flex-start); -} - -.layout-wrapper { - @include absolutes; - - display: grid; - - width: 100%; - height: 100%; - - grid-template-columns: 80px auto; - grid-template-rows: 1fr; -} - -/* ===== Scrollbar CSS ===== */ -/* Firefox */ -* { - scrollbar-width: thin; - scrollbar-color: #d6d6d6 #ffffff; -} - -/* Chrome, Edge, and Safari */ -*::-webkit-scrollbar { - width: 8px; -} - -*::-webkit-scrollbar-track { - background: #ffffff; -} - -*::-webkit-scrollbar-thumb { - background-color: #d6d6d6; - border-radius: 16px; - border: 4px double #dedede; -} +// Post javascript styles diff --git a/ProductivitySuite/ui/react/src/App.tsx b/ProductivitySuite/ui/react/src/App.tsx index c12ee1d8fa..970cbc58c6 100644 --- a/ProductivitySuite/ui/react/src/App.tsx +++ b/ProductivitySuite/ui/react/src/App.tsx @@ -1,68 +1,195 @@ -// Copyright (C) 2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -import "./App.scss" -import {MantineProvider } from "@mantine/core" -import '@mantine/notifications/styles.css'; -import { SideNavbar, SidebarNavList } from "./components/sidebar/sidebar" -import { IconMessages, IconFileTextAi, IconCode, IconFileInfo, IconDatabaseCog } from "@tabler/icons-react" -import Conversation from "./components/Conversation/Conversation" -import { Notifications } from '@mantine/notifications'; -import { BrowserRouter, Route, Routes } from "react-router-dom"; -import CodeGen from "./components/CodeGen/CodeGen"; -import DocSum from "./components/DocSum/DocSum"; -import FaqGen from "./components/FaqGen/FaqGen"; +import "./App.scss"; + +import React, { Suspense, useEffect } from "react"; +import { BrowserRouter, Routes, Route } from "react-router-dom"; +import ProtectedRoute from "@layouts/ProtectedRoute/ProtectedRoute"; + import { useKeycloak } from "@react-keycloak/web"; -import DataSource from "./components/Conversation/DataSource"; -import { useAppDispatch } from "./redux/store"; -import { setUser } from "./redux/User/userSlice"; -import { useEffect } from "react"; - -const title = "Chat QnA" -const navList: SidebarNavList = [ - { icon: IconMessages, label: "Chat Qna", path: "/", children: }, - { icon: IconCode, label: "CodeGen", path: "/codegen", children: }, - { icon: IconFileTextAi, label: "DocSum", path: "/docsum", children: }, - { icon: IconFileInfo, label: "FaqGen", path: "/faqgen", children: }, - { icon: IconDatabaseCog, label: "Data Management", path: "/data-management", children: } -] - -function App() { +import { setUser, userSelector } from "@redux/User/userSlice"; + +import MainLayout from "@layouts/Main/MainLayout"; +import MinimalLayout from "@layouts/Minimal/MinimalLayout"; +import Notification from "@components/Notification/Notification"; +import { Box, styled, Typography } from "@mui/material"; +import { AtomAnimation, AtomIcon } from "@icons/Atom"; + +import { useAppDispatch, useAppSelector } from "@redux/store"; +import { + conversationSelector, + getAllConversations, + /*getStoredPromptSettings,*/ getSupportedModels, + getSupportedUseCases, +} from "@redux/Conversation/ConversationSlice"; +import { getPrompts } from "@redux/Prompt/PromptSlice"; + +import Home from "@pages/Home/Home"; +import ChatView from "@pages/Chat/ChatView"; + +const HistoryView = React.lazy(() => import("@pages/History/HistoryView")); +const DataSourceManagement = React.lazy( + () => import("@pages/DataSource/DataSourceManagement"), +); + +const LoadingBox = styled(Box)({ + display: "flex", + flexDirection: "column", + justifyContent: "center", + alignItems: "center", + height: "100vh", + width: "100vw", +}); + +const App = () => { const { keycloak } = useKeycloak(); - const dispatch = useAppDispatch() - useEffect(()=>{ - dispatch(setUser(keycloak?.idTokenParsed?.preferred_username)) - },[keycloak.idTokenParsed]) - - return ( - <> - - {!keycloak.authenticated ? ( - "redirecting to sso ..." - ) : ( - - - -
- -
- - {navList.map(tab => { - return () - })} - - - - -
-
-
- )} -
- - - ) - -} - -export default App + const dispatch = useAppDispatch(); + + const { name } = useAppSelector(userSelector); + const { useCase } = useAppSelector(conversationSelector); + + useEffect(() => { + //TODO: get role from keyCloack scope, defaulting to Admin + dispatch( + setUser({ + name: keycloak?.idTokenParsed?.preferred_username, + isAuthenticated: true, + role: "Admin", + }), + ); + }, [keycloak.idTokenParsed]); + + const initSettings = () => { + if (keycloak.authenticated) { + dispatch(getSupportedUseCases()); + dispatch(getSupportedModels()); + dispatch(getPrompts()); + } + }; + + useEffect(() => { + if (keycloak.authenticated) initSettings(); + }, [keycloak.authenticated]); + + //TODO: on potential useCase change get different conversation data + useEffect(() => { + if (keycloak.authenticated && useCase) { + dispatch(getAllConversations({ user: name, useCase: useCase })); + // dispatch(getSharedConversations({ usecase: selectedUseCase.use_case })); + } + }, [useCase, name]); + + return !keycloak.authenticated ? ( + + + redirecting to sso ... + + ) : ( + + + + + } + > + + {/* Routes wrapped in MainLayout */} + }> + + } + /> + + + }> + + } + /> + + + }> + ( + + )} + /> + } + /> + ( + + )} + /> + } + /> + + + }> + + } + /> + + } + /> + + } + /> + + } + /> + + + {/* Routes not wrapped in MainLayout */} + }> + {/* } /> */} + + + + + + ); +}; + +export default App; diff --git a/ProductivitySuite/ui/react/src/assets/icons/moon.svg b/ProductivitySuite/ui/react/src/assets/icons/moon.svg new file mode 100644 index 0000000000..a9f36a8321 --- /dev/null +++ b/ProductivitySuite/ui/react/src/assets/icons/moon.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ProductivitySuite/ui/react/src/assets/icons/sun.svg b/ProductivitySuite/ui/react/src/assets/icons/sun.svg new file mode 100644 index 0000000000..510dad63bf --- /dev/null +++ b/ProductivitySuite/ui/react/src/assets/icons/sun.svg @@ -0,0 +1,4 @@ + + + + \ No newline at end of file diff --git a/ProductivitySuite/ui/react/src/assets/opea-icon-black.svg b/ProductivitySuite/ui/react/src/assets/opea-icon-black.svg deleted file mode 100644 index 5c96dc7622..0000000000 --- a/ProductivitySuite/ui/react/src/assets/opea-icon-black.svg +++ /dev/null @@ -1,39 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ProductivitySuite/ui/react/src/assets/opea-icon-color.svg b/ProductivitySuite/ui/react/src/assets/opea-icon-color.svg deleted file mode 100644 index 790151171e..0000000000 --- a/ProductivitySuite/ui/react/src/assets/opea-icon-color.svg +++ /dev/null @@ -1,40 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ProductivitySuite/ui/react/src/assets/react.svg b/ProductivitySuite/ui/react/src/assets/react.svg deleted file mode 100644 index 8e0e0f15c0..0000000000 --- a/ProductivitySuite/ui/react/src/assets/react.svg +++ /dev/null @@ -1 +0,0 @@ - diff --git a/ProductivitySuite/ui/react/src/common/client.ts b/ProductivitySuite/ui/react/src/common/client.ts deleted file mode 100644 index 7512f73e33..0000000000 --- a/ProductivitySuite/ui/react/src/common/client.ts +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright (C) 2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -import axios from "axios"; - -//add iterceptors to add any request headers - -export default axios; diff --git a/ProductivitySuite/ui/react/src/components/Chat_Assistant/ChatAssistant.module.scss b/ProductivitySuite/ui/react/src/components/Chat_Assistant/ChatAssistant.module.scss new file mode 100644 index 0000000000..ac8428e853 --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Assistant/ChatAssistant.module.scss @@ -0,0 +1,68 @@ +.chatReply { + display: flex; + flex-direction: row; + + .icon { + padding-right: 1rem; + + svg { + width: 24px; + height: 24px; + } + } +} + +.ellipsis { + position: relative; + + span { + position: relative; + animation: dance 1.5s infinite ease-in-out; + } + + span:nth-child(1) { + margin-left: 2px; + animation-delay: 0s; + } + + span:nth-child(2) { + animation-delay: 0.3s; + } + + span:nth-child(3) { + animation-delay: 0.6s; + } +} + +@keyframes dance { + 0%, + 100% { + bottom: 0; + opacity: 1; + } + 20% { + bottom: 5px; + opacity: 0.7; + } + 40% { + bottom: 0; + opacity: 1; + } +} + +.textedit { + width: 100%; + min-height: 50px; + padding: 1rem; +} + +.chatPrompt { + width: 100%; + overflow-wrap: break-word; + word-wrap: break-word; + word-break: break-word; + + p:first-of-type { + margin-top: 0; + } +} diff --git a/ProductivitySuite/ui/react/src/components/Chat_Assistant/ChatAssistant.tsx b/ProductivitySuite/ui/react/src/components/Chat_Assistant/ChatAssistant.tsx new file mode 100644 index 0000000000..9194a5ad1d --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Assistant/ChatAssistant.tsx @@ -0,0 +1,227 @@ +import React, { useEffect, useRef, useState } from "react"; + +import styles from "./ChatAssistant.module.scss"; +import { + Button, + Typography, + IconButton, + Box, + styled, + Tooltip, +} from "@mui/material"; +import { AtomIcon } from "@icons/Atom"; +import ThumbUpIcon from "@mui/icons-material/ThumbUp"; +import ThumbUpOutlinedIcon from "@mui/icons-material/ThumbUpOutlined"; +import ThumbDownIcon from "@mui/icons-material/ThumbDown"; +import ThumbDownOutlinedIcon from "@mui/icons-material/ThumbDownOutlined"; +import ContentCopyIcon from "@mui/icons-material/ContentCopy"; +import EditNoteIcon from "@mui/icons-material/EditNote"; +import ChatSettingsModal from "@components/Chat_SettingsModal/ChatSettingsModal"; + +import { + NotificationSeverity, + notify, +} from "@components/Notification/Notification"; +import { ChatMessageProps, Message } from "@redux/Conversation/Conversation"; +import ChatMarkdown from "@components/Chat_Markdown/ChatMarkdown"; +import { useAppDispatch, useAppSelector } from "@redux/store"; +import { + conversationSelector, + saveConversationtoDatabase, + setSelectedConversationHistory, +} from "@redux/Conversation/ConversationSlice"; +import WaitingIcon from "@icons/Waiting"; + +const CancelStyle = styled(Button)(({ theme }) => ({ + ...theme.customStyles.actionButtons.delete, +})); + +const SaveStyle = styled(Button)(({ theme }) => ({ + ...theme.customStyles.actionButtons.solid, +})); + +const ChatAssistant: React.FC = ({ + message, + pending = false, +}) => { + const dispatch = useAppDispatch(); + const { + onGoingResult, + selectedConversationHistory, + selectedConversationId, + type, + } = useAppSelector(conversationSelector); + + const [currentMessage, setCurrentMessage] = useState(message); + const [editResponse, setEditResponse] = useState(false); + const responseRef = useRef(currentMessage.content); + const [disabledSave, setDisabledSave] = useState(false); + const [inputHeight, setInputHeight] = useState(0); + const heightCheck = useRef(null); + const isClipboardAvailable = navigator.clipboard && window.isSecureContext; + + useEffect(() => { + setCurrentMessage(message); + }, [message]); + + const assistantMessage = currentMessage.content ?? ""; + + // const [feedback, setFeedback] = useState( + // currentMessage.feedback?.is_thumbs_up === true ? true : currentMessage.feedback?.is_thumbs_up === false ? false : null + // ); + + // const submitFeedback = (thumbsUp: boolean) => { + // setFeedback(thumbsUp); + // notify('Feedback Submitted', NotificationSeverity.SUCCESS); + // // MessageService.submitFeedback({ id: currentMessage.message_id, feedback: {is_thumbs_up: thumbsUp}, useCase: selectedUseCase.use_case }); + // }; + + const copyText = (text: string) => { + navigator.clipboard.writeText(text); + notify("Copied to clipboard", NotificationSeverity.SUCCESS); + }; + + const modifyResponse = () => { + if (heightCheck.current) { + let updateHeight = heightCheck.current.offsetHeight; + setInputHeight(updateHeight); + setEditResponse(true); + } + }; + + const updateResponse = (response: string) => { + responseRef.current = response; + setDisabledSave(response === ""); + }; + + const saveResponse = () => { + const convoClone: Message[] = selectedConversationHistory.map( + (messageItem) => { + if (messageItem.time === currentMessage.time) { + return { + ...messageItem, + content: responseRef.current, + }; + } + return messageItem; + }, + ); + + dispatch(setSelectedConversationHistory(convoClone)); + dispatch( + saveConversationtoDatabase({ + conversation: { id: selectedConversationId }, + }), + ); + + setInputHeight(0); + setEditResponse(false); + setDisabledSave(false); + }; + + const cancelResponse = () => { + setEditResponse(false); + }; + + const displayCurrentMessage = () => { + if (currentMessage.content) { + if (editResponse) { + return ( +
+ + + + Save + + Cancel +
+ ); + } else { + return ( + + + + ); + } + } else { + return ( + + Generating response + + . + . + . + + + ); + } + }; + + const displayMessageActions = () => { + if (onGoingResult) return; + + return ( + + {/*TODO: feedback support */} + {/* submitFeedback(true)}> + {feedback === null || feedback === false ? ( + + ) : ( + + )} + + + submitFeedback(false)}> + {feedback === null || feedback === true ? ( + + ) : ( + + )} + */} + + + + {isClipboardAvailable && ( + + copyText(assistantMessage)}> + + + + )} + + {type === "chat" && ( + + + + + + )} + + ); + }; + + return ( +
+
+ +
+ +
+ {displayCurrentMessage()} + + {!pending && displayMessageActions()} +
+
+ ); +}; + +export default ChatAssistant; diff --git a/ProductivitySuite/ui/react/src/components/Chat_Markdown/ChatMarkdown.tsx b/ProductivitySuite/ui/react/src/components/Chat_Markdown/ChatMarkdown.tsx new file mode 100644 index 0000000000..2d8c0658d9 --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Markdown/ChatMarkdown.tsx @@ -0,0 +1,90 @@ +import React, { lazy, Suspense, useEffect } from "react"; +import markdownStyles from "./markdown.module.scss"; +import ReactMarkdown from "react-markdown"; +import remarkGfm from "remark-gfm"; +import remarkFrontmatter from "remark-frontmatter"; +import remarkBreaks from "remark-breaks"; + +const CodeRender = lazy(() => import("./CodeRender/CodeRender")); + +type MarkdownProps = { + content: string; +}; + +const ChatMarkdown = ({ content }: MarkdownProps) => { + useEffect(() => { + // preload in background + import("./CodeRender/CodeRender"); + }, []); + + return ( + { + // check for nested block elements attempting to inject into a p tag + const hasBlockElement = React.Children.toArray(children).some( + (child) => + React.isValidElement(child) && + typeof child.type === "string" && + ["div", "h1", "h2", "h3", "ul", "ol", "table"].includes( + child.type, + ), + ); + + // If block-level elements are found, avoid wrapping in

+ return hasBlockElement ? ( + <>{children} + ) : ( +

+ {children} +

+ ); + }, + a: ({ children, ...props }) => { + return ( + //@ts-ignore + + {children} + + ); + }, + table: ({ children, ...props }) => { + return ( +
+ {children}
+
+ ); + }, + code({ inline, className, children }) { + const lang = /language-(\w+)/.exec(className || ""); + return ( + Loading Code Block...}> + {/*@ts-ignore*/} + + + ); + }, + }} + /> + ); +}; + +export default ChatMarkdown; diff --git a/ProductivitySuite/ui/react/src/components/Chat_Markdown/CodeRender/CodeRender.tsx b/ProductivitySuite/ui/react/src/components/Chat_Markdown/CodeRender/CodeRender.tsx new file mode 100644 index 0000000000..3fb833c90f --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Markdown/CodeRender/CodeRender.tsx @@ -0,0 +1,78 @@ +import styles from "./codeRender.module.scss"; +import { Light as SyntaxHighlighter } from "react-syntax-highlighter"; +import { + atomOneDark, + atomOneLight, +} from "react-syntax-highlighter/dist/esm/styles/hljs"; +import ContentCopyIcon from "@mui/icons-material/ContentCopy"; +import { IconButton, styled, Tooltip, useTheme } from "@mui/material"; +import { + NotificationSeverity, + notify, +} from "@components/Notification/Notification"; + +const TitleBox = styled("div")(({ theme }) => ({ + background: theme.customStyles.code?.primary, + color: theme.customStyles.code?.title, +})); + +const StyledCode = styled(SyntaxHighlighter)(({ theme }) => ({ + background: theme.customStyles.code?.secondary + " !important", +})); + +type CodeRenderProps = { + cleanCode: React.ReactNode; + language: string; + inline: boolean; +}; +const CodeRender = ({ cleanCode, language, inline }: CodeRenderProps) => { + const theme = useTheme(); + + const isClipboardAvailable = navigator.clipboard && window.isSecureContext; + + cleanCode = String(cleanCode) + .replace(/\n$/, "") + .replace(/^\s*[\r\n]/gm, ""); //right trim and remove empty lines from the input + + const copyText = (text: string) => { + navigator.clipboard.writeText(text); + notify("Copied to clipboard", NotificationSeverity.SUCCESS); + }; + + try { + return inline ? ( + + {cleanCode} + + ) : ( +
+ +
+ {language || "language not detected"} +
+
+ {isClipboardAvailable && ( + + copyText(cleanCode.toString())}> + + + + )} +
+
+ +
+ ); + } catch (err) { + return
{cleanCode}
; + } +}; + +export default CodeRender; diff --git a/ProductivitySuite/ui/react/src/components/Chat_Markdown/CodeRender/codeRender.module.scss b/ProductivitySuite/ui/react/src/components/Chat_Markdown/CodeRender/codeRender.module.scss new file mode 100644 index 0000000000..596004846e --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Markdown/CodeRender/codeRender.module.scss @@ -0,0 +1,36 @@ +.code { + margin: 7px 0px; + + .codeHead { + padding: 0px 10px !important; + display: flex; + flex-direction: row; + flex-wrap: nowrap; + align-items: center; + justify-content: space-between; + + .codeTitle { + } + + .codeActionGroup { + display: flex; + flex-direction: row; + flex-wrap: nowrap; + align-items: center; + justify-content: flex-start; + } + } + + .codeHighlighterDiv { + margin: 0px !important; + white-space: pre-wrap !important; + + code { + white-space: pre-wrap !important; + } + } +} + +.inlineCode { + background: #fff; +} diff --git a/ProductivitySuite/ui/react/src/components/Chat_Markdown/markdown.module.scss b/ProductivitySuite/ui/react/src/components/Chat_Markdown/markdown.module.scss new file mode 100644 index 0000000000..e86902eed3 --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Markdown/markdown.module.scss @@ -0,0 +1,29 @@ +.tableDiv { + &:first-of-type { + padding-top: 0px !important; + } + + table, + th, + td { + border: 1px solid black; + border-collapse: collapse; + padding: 5px; + } +} + +.md { + li { + margin-left: 35px; /* Adjust the value based on your preference */ + } +} + +.markdownWrapper { + > p:first-of-type { + margin-top: 0.25rem; + } + + > p:last-of-type { + margin-bottom: 0.25rem; + } +} diff --git a/ProductivitySuite/ui/react/src/components/Chat_SettingsModal/ChatSettingsModal.module.scss b/ProductivitySuite/ui/react/src/components/Chat_SettingsModal/ChatSettingsModal.module.scss new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ProductivitySuite/ui/react/src/components/Chat_SettingsModal/ChatSettingsModal.tsx b/ProductivitySuite/ui/react/src/components/Chat_SettingsModal/ChatSettingsModal.tsx new file mode 100644 index 0000000000..732e5a2123 --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_SettingsModal/ChatSettingsModal.tsx @@ -0,0 +1,43 @@ +import * as React from "react"; +import { + Box, + Typography, + Modal, + IconButton, + styled, + Tooltip, +} from "@mui/material"; +import SettingsApplicationsOutlinedIcon from "@mui/icons-material/SettingsApplicationsOutlined"; +import PromptSettings from "@components/PromptSettings/PromptSettings"; +import { Close } from "@mui/icons-material"; +import ModalBox from "@root/shared/ModalBox/ModalBox"; + +const ChatSettingsModal = () => { + const [open, setOpen] = React.useState(false); + const handleOpen = () => setOpen(true); + const handleClose = () => setOpen(false); + + return ( +
+ + + + + + + + + Response Settings + setOpen(false)}> + + + + + + + +
+ ); +}; + +export default ChatSettingsModal; diff --git a/ProductivitySuite/ui/react/src/components/Chat_Sources/ChatSources.module.scss b/ProductivitySuite/ui/react/src/components/Chat_Sources/ChatSources.module.scss new file mode 100644 index 0000000000..1a6a0d76e6 --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Sources/ChatSources.module.scss @@ -0,0 +1,47 @@ +.sourceWrapper { + display: flex; + flex-direction: row; + justify-content: flex-end; + flex-wrap: wrap; + width: var(--content-width); + margin: 0 auto var(--vertical-spacer); + max-width: 100%; +} + +.iconWrap { + border: none; + border-radius: 6px; + margin-right: 0.5rem; + width: 30px; + height: 30px; + display: flex; + align-items: center; + justify-content: center; +} + +.sourceBox { + display: flex; + flex-direction: row; + align-items: center; + justify-content: center; + margin-left: 1rem; + padding: 5px; + border-radius: 6px; + margin-bottom: 1rem; +} + +.title { + margin: 0 0.5rem 0 0; + white-space: nowrap; + display: inline-block; + max-width: 150px; + overflow: hidden; + text-overflow: ellipsis; + font-weight: 400; +} + +.chip { + border-radius: 8px; + padding: 3px; + font-size: 12px; +} diff --git a/ProductivitySuite/ui/react/src/components/Chat_Sources/ChatSources.tsx b/ProductivitySuite/ui/react/src/components/Chat_Sources/ChatSources.tsx new file mode 100644 index 0000000000..2bf0858254 --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_Sources/ChatSources.tsx @@ -0,0 +1,28 @@ +import { Box } from "@mui/material"; +import { conversationSelector } from "@redux/Conversation/ConversationSlice"; +import { useAppSelector } from "@redux/store"; +import styles from "./ChatSources.module.scss"; +import FileDispaly from "@components/File_Display/FileDisplay"; + +const ChatSources: React.FC = () => { + const { sourceLinks, sourceFiles, sourceType } = + useAppSelector(conversationSelector); + const isWeb = sourceType === "web"; + const sourceElements = isWeb ? sourceLinks : sourceFiles; + + if (sourceLinks.length === 0 && sourceFiles.length === 0) return; + + const renderElements = () => { + return sourceElements.map((element: any, elementIndex) => { + return ( + + + + ); + }); + }; + + return {renderElements()}; +}; + +export default ChatSources; diff --git a/ProductivitySuite/ui/react/src/components/Chat_User/ChatUser.module.scss b/ProductivitySuite/ui/react/src/components/Chat_User/ChatUser.module.scss new file mode 100644 index 0000000000..3a5b5079ee --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_User/ChatUser.module.scss @@ -0,0 +1,27 @@ +.userWrapper { + display: flex; + justify-content: flex-end; + margin-bottom: 2rem; + position: relative; + + .userPrompt { + max-width: 80%; + border-radius: var(--input-radius); + padding: 0.75rem 2rem 0.75rem 1rem; + overflow-wrap: break-word; + word-wrap: break-word; + word-break: break-word; + } + + .addIcon { + position: absolute; + right: -16px; + top: 3px; + opacity: 0; + transition: opacity 0.3s; + } + + &:hover .addIcon { + opacity: 1; + } +} diff --git a/ProductivitySuite/ui/react/src/components/Chat_User/ChatUser.tsx b/ProductivitySuite/ui/react/src/components/Chat_User/ChatUser.tsx new file mode 100644 index 0000000000..37971f87cb --- /dev/null +++ b/ProductivitySuite/ui/react/src/components/Chat_User/ChatUser.tsx @@ -0,0 +1,44 @@ +import { IconButton, styled, Tooltip } from "@mui/material"; +import React from "react"; +import styles from "./ChatUser.module.scss"; +import AddCircle from "@mui/icons-material/AddCircle"; +import { useAppDispatch } from "@redux/store"; +import { addPrompt } from "@redux/Prompt/PromptSlice"; +import ChatMarkdown from "@components/Chat_Markdown/ChatMarkdown"; + +interface ChatUserProps { + content: string; +} + +const UserInput = styled("div")(({ theme }) => ({ + background: theme.customStyles.user?.main, +})); + +const AddIcon = styled(AddCircle)(({ theme }) => ({ + path: { + fill: theme.customStyles.icon?.main, + }, +})); + +const ChatUser: React.FC = ({ content }) => { + const dispatch = useAppDispatch(); + + const sharePrompt = () => { + dispatch(addPrompt({ promptText: content })); + }; + + return ( +
+ + + + + + + + +
+ ); +}; + +export default ChatUser; diff --git a/ProductivitySuite/ui/react/src/components/CodeGen/CodeGen.tsx b/ProductivitySuite/ui/react/src/components/CodeGen/CodeGen.tsx deleted file mode 100644 index 29c96f61cb..0000000000 --- a/ProductivitySuite/ui/react/src/components/CodeGen/CodeGen.tsx +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (C) 2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -import { KeyboardEventHandler, SyntheticEvent, useEffect, useRef, useState } from 'react' -import styleClasses from "./codeGen.module.scss" -import { ActionIcon, Textarea, Title, rem } from '@mantine/core' -import { IconArrowRight } from '@tabler/icons-react' -import { ConversationMessage } from '../Message/conversationMessage' -import { fetchEventSource } from '@microsoft/fetch-event-source' -import { CODE_GEN_URL } from '../../config' - - - -const CodeGen = () => { - const [prompt, setPrompt] = useState("") - const [submittedPrompt, setSubmittedPrompt] = useState("") - const [response,setResponse] = useState(""); - const promptInputRef = useRef(null) - const scrollViewport = useRef(null) - - const toSend = "Enter" - - const handleSubmit = async () => { - setResponse("") - setSubmittedPrompt(prompt) - const body = { - messages:prompt - } - fetchEventSource(CODE_GEN_URL, { - method: "POST", - headers: { - "Content-Type": "application/json", - "Accept":"*/*" - }, - body: JSON.stringify(body), - openWhenHidden: true, - async onopen(response) { - if (response.ok) { - return; - } else if (response.status >= 400 && response.status < 500 && response.status !== 429) { - const e = await response.json(); - console.log(e); - throw Error(e.error.message); - } else { - console.log("error", response); - } - }, - onmessage(msg) { - if (msg?.data != "[DONE]") { - try { - const match = msg.data.match(/b'([^']*)'/); - if (match && match[1] != "") { - const extractedText = match[1].replace(/\\n/g, "\n"); - setResponse(prev=>prev+extractedText); - } - } catch (e) { - console.log("something wrong in msg", e); - throw e; - } - } - }, - onerror(err) { - console.log("error", err); - setResponse("") - throw err; - }, - onclose() { - setPrompt("") - }, - }); - - } - - const scrollToBottom = () => { - scrollViewport.current!.scrollTo({ top: scrollViewport.current!.scrollHeight }) - } - - useEffect(() => { - scrollToBottom() - }, [response]) - - const handleKeyDown: KeyboardEventHandler = (event) => { - if (!event.shiftKey && event.key === toSend) { - handleSubmit() - setTimeout(() => { - setPrompt("") - }, 1) - } - } - - const handleChange = (event: SyntheticEvent) => { - event.preventDefault() - setPrompt((event.target as HTMLTextAreaElement).value) - } - return ( -
-
-
-
- CodeGen -
- -
- {!submittedPrompt && !response && - (<> -
Start by asking a question
- ) - } - {submittedPrompt && ( - - )} - {response && ( - - )} -
- -
-