Skip to content

Commit 38e100d

Browse files
authored
Merge pull request #105 from ant-xuexiao/feat_bot_generator
feat: init bot builder
2 parents 88db6fb + 4bc3fda commit 38e100d

File tree

18 files changed

+281
-260
lines changed

18 files changed

+281
-260
lines changed

client/share/supabas-client.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { Database } from '@/types/database.types';
22
import { SupabaseClient, createClient } from '@supabase/supabase-js';
33

44
const supabaseUrl = process.env.SUPABASE_URL!;
5-
const supabaseAnonKey = process.env.SUPABASE_API_KEY!;
5+
const supabaseAnonKey = process.env.SUPABASE_SERVICE_KEY!;
66
export const supabase: SupabaseClient = createClient<Database>(
77
supabaseUrl,
88
supabaseAnonKey,

docker/docker-compose.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ services:
3636
- 3000:3000
3737
environment:
3838
SUPABASE_URL: ${SUPABASE_URL}
39-
SUPABASE_API_KEY: ${SUPABASE_SERVICE_KEY}
39+
SUPABASE_SERVICE_KEY: ${SUPABASE_SERVICE_KEY}
4040
NEXT_PUBLIC_ASSISTANT_API_HOST: http://0.0.0.0:8080
4141
NEXT_STANDALONE: true
4242

lui/src/services/ChatController.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ export async function streamChat(
88
messages: IPrompt[],
99
host = 'http://127.0.0.1:8000',
1010
): Promise<Response> {
11-
return fetch(`${host}/api/chat/stream`, {
11+
return fetch(`${host}/api/chat/qa`, {
1212
method: 'POST',
1313
headers: {
1414
'Content-Type': 'application/json',

package.json

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,11 @@
44
"private": true,
55
"scripts": {
66
"bootstrap": "cd client && yarn && cd ../server && bash setup_python.sh",
7-
"dev:client": "cd client && yarn run dev",
8-
"dev:server": "cd server && ./venv/bin/python3 -m uvicorn main:app --reload",
9-
"dev:app": "concurrently \"yarn run dev:client\" \"yarn run dev:server\"",
7+
"client": "cd client && yarn run dev",
8+
"lui": "cd lui && yarn run dev",
9+
"server": "cd server && ./venv/bin/python3 -m uvicorn main:app --reload",
10+
"client:server": "concurrently \"yarn run server\" \"yarn run client\"",
11+
"lui:server": "concurrently \"yarn run server\" \"yarn run lui\"",
1012
"build:docker": "docker build -t bot-meta ."
1113
},
1214
"engines": {
File renamed without changes.

server/agent/base.py

Lines changed: 170 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,170 @@
1+
import json
2+
from typing import AsyncIterator, Dict, Callable, Optional
3+
import uuid
4+
from langchain.agents import AgentExecutor
5+
from data_class import ChatData, Message
6+
from langchain.agents.format_scratchpad.openai_tools import (
7+
format_to_openai_tool_messages,
8+
)
9+
from langchain_core.messages import AIMessage, FunctionMessage, HumanMessage
10+
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
11+
from langchain.prompts import PromptTemplate, MessagesPlaceholder
12+
from langchain_core.utils.function_calling import convert_to_openai_tool
13+
from langchain_core.prompts import ChatPromptTemplate
14+
from langchain.utilities.tavily_search import TavilySearchAPIWrapper
15+
from langchain.tools.tavily_search import TavilySearchResults
16+
from langchain_openai import ChatOpenAI
17+
from uilts.env import get_env_variable
18+
19+
OPEN_API_KEY = get_env_variable("OPENAI_API_KEY")
20+
TAVILY_API_KEY = get_env_variable("TAVILY_API_KEY")
21+
22+
class AgentBuilder:
23+
24+
def __init__(
25+
self,
26+
prompt: str,
27+
tools: Dict[str, Callable],
28+
enable_tavily: Optional[bool] = True,
29+
temperature: Optional[int] = 0.2,
30+
max_tokens: Optional[int] = 1500
31+
):
32+
"""
33+
@class `Builde AgentExecutor based on tools and prompt`
34+
@param prompt: str
35+
@param tools: Dict[str, Callable]
36+
@param enable_tavily: Optional[bool] If set True, enables the Tavily tool
37+
@param temperature: Optional[int]
38+
@param max_tokens: Optional[int]
39+
"""
40+
self.prompt = prompt
41+
self.tools = tools
42+
self.enable_tavily = enable_tavily
43+
self.temperature = temperature
44+
self.max_tokens = max_tokens
45+
self.agent_executor = self._create_agent_with_tools()
46+
47+
def init_tavily_tools(self):
48+
# init Tavily
49+
search = TavilySearchAPIWrapper()
50+
tavily_tool = TavilySearchResults(api_wrapper=search)
51+
return [tavily_tool]
52+
53+
def _create_agent_with_tools(self) -> AgentExecutor:
54+
llm = ChatOpenAI(model="gpt-4-1106-preview", temperature=self.temperature, streaming=True, max_tokens=self.max_tokens, openai_api_key=OPEN_API_KEY)
55+
56+
tools = self.init_tavily_tools() if self.enable_tavily else []
57+
for tool in self.tools.values():
58+
tools.append(tool)
59+
60+
if tools:
61+
llm_with_tools = llm.bind(
62+
tools=[convert_to_openai_tool(tool) for tool in tools]
63+
)
64+
else:
65+
llm_with_tools = llm
66+
67+
self.prompt = self.get_prompt()
68+
agent = (
69+
{
70+
"input": lambda x: x["input"],
71+
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
72+
x["intermediate_steps"]
73+
),
74+
"chat_history": lambda x: x["chat_history"],
75+
}
76+
| self.prompt
77+
| llm_with_tools
78+
| OpenAIToolsAgentOutputParser()
79+
)
80+
81+
return AgentExecutor(agent=agent, tools=tools, verbose=True, handle_parsing_errors=True, max_iterations=5)
82+
83+
def get_prompt(self):
84+
return ChatPromptTemplate.from_messages(
85+
[
86+
("system", self.prompt),
87+
MessagesPlaceholder(variable_name="chat_history"),
88+
("user", "{input}"),
89+
MessagesPlaceholder(variable_name="agent_scratchpad"),
90+
]
91+
)
92+
93+
@staticmethod
94+
def chat_history_transform(messages: list[Message]):
95+
transformed_messages = []
96+
for message in messages:
97+
print('message', message)
98+
if message.role == "user":
99+
transformed_messages.append(HumanMessage(content=message.content))
100+
elif message.role == "assistant":
101+
transformed_messages.append(AIMessage(content=message.content))
102+
else:
103+
transformed_messages.append(FunctionMessage(content=message.content))
104+
return transformed_messages
105+
106+
async def run_chat(self, input_data: ChatData) -> AsyncIterator[str]:
107+
try:
108+
messages = input_data.messages
109+
print(self.chat_history_transform(messages))
110+
111+
async for event in self.agent_executor.astream_events(
112+
{
113+
"input": messages[len(messages) - 1].content,
114+
"chat_history": self.chat_history_transform(messages),
115+
},
116+
version="v1",
117+
):
118+
kind = event["event"]
119+
if kind == "on_chain_start":
120+
if (
121+
event["name"] == "agent"
122+
):
123+
print(
124+
f"Starting agent: {event['name']} "
125+
f"with input: {event['data'].get('input')}"
126+
)
127+
elif kind == "on_chain_end":
128+
if (
129+
event["name"] == "agent"
130+
):
131+
print (
132+
f"Done agent: {event['name']} "
133+
f"with output: {event['data'].get('output')['output']}"
134+
)
135+
if kind == "on_chat_model_stream":
136+
uid = str(uuid.uuid4())
137+
content = event["data"]["chunk"].content
138+
if content:
139+
yield f"{content}"
140+
elif kind == "on_tool_start":
141+
children_value = event["data"].get("input", {})
142+
json_output = json.dumps({
143+
"type": "tool",
144+
"id": uid,
145+
"extra": {
146+
"source": f"已调用工具: {event['name']}",
147+
"pluginName": "GitHub",
148+
"data": json.dumps(children_value, ensure_ascii=False),
149+
"status": "loading"
150+
}
151+
}, ensure_ascii=False)
152+
153+
yield f"<TOOL>{json_output}\n"
154+
elif kind == "on_tool_end":
155+
children_value = event["data"].get("output", {})
156+
json_output = json.dumps({
157+
"type": "tool",
158+
"id": uid,
159+
"extra": {
160+
"source": f"已调用工具: {event['name']}",
161+
"pluginName": "GitHub",
162+
"data": children_value,
163+
"status": "success"
164+
},
165+
}, ensure_ascii=False)
166+
yield f"<TOOL>{json_output}\n<ANSWER>"
167+
except Exception as e:
168+
yield f"data: {str(e)}\n"
169+
170+

server/agent/bot_builder.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from typing import AsyncIterator
2+
from data_class import ChatData
3+
from agent.base import AgentBuilder
4+
5+
PROMPT = """
6+
# 角色
7+
你是一 GitHub 答疑机器创建助手。你擅长根据用户提供的 Github 仓库信息创建一个答疑机器人。
8+
9+
## 技能
10+
### 技能1:获取并确认仓库信息
11+
- 引导用户提供他们的GitHub仓库信息。
12+
- 根据提供的信息确认这个仓库存在并且可以访问。
13+
14+
### 技能2:创建答疑机器人
15+
- 使用bot_builder工具根据用户提供的Github仓库信息创建机器人。
16+
17+
### 技能3:修改机器人的配置
18+
- 根据用户的描述进行机器人的配置信息修改。
19+
20+
## 限制
21+
- 只能基于用户提供的Github仓库信息创建答疑机器人。
22+
- 在创建答疑机器人的过程中,如果遇到问题或者错误,可以提供相关建议或解决方案,但不能直接修改用户的Github仓库。
23+
- 在修改机器人的配置信息时,必须遵守用户的建议和要求,不能擅自改变。
24+
"""
25+
26+
27+
TOOL_MAPPING = {}
28+
29+
def agent_chat(input_data: ChatData) -> AsyncIterator[str]:
30+
agent = AgentBuilder(prompt=PROMPT, tools={}, enable_tavily=False)
31+
return agent.run_chat(input_data)

server/agent/qa_chat.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
from typing import AsyncIterator
2+
from data_class import ChatData
3+
from agent.base import AgentBuilder
4+
from tools import issue, sourcecode, knowledge
5+
6+
7+
PROMPT = """
8+
# Character
9+
You are a skilled assistant dedicated to Ant Design, capable of delivering comprehensive insights and solutions pertaining to Ant Design. You excel in fixing code issues correlated with Ant Design.
10+
11+
## Skills
12+
### Skill 1: Engaging Interaction
13+
Your primary role involves engaging with users, offering them in-depth responses to their Ant Design inquiries in a conversational fashion.
14+
15+
### Skill 2: Insightful Information Search
16+
For queries that touch upon unfamiliar zones, you are equipped with two powerful knowledge lookup tools, used to gather necessary details:
17+
- search_knowledge: This is your initial resource for queries concerning ambiguous topics about Ant Design. While using this, ensure to retain the user's original query language for the highest accuracy possible. Therefore, a specific question like 'Ant Design的新特性是什么?' should be searched as 'Ant Design的新特性是什么?'.
18+
- tavily_search_results_json: Should search_knowledge fail to accommodate the required facts, this tool would be the next step.
19+
20+
### Skill 3: Expert Issue Solver
21+
In case of specific issues reported by users, you are to aid them using a selection of bespoke tools, curated as per the issue nature and prescribed steps. The common instances cater to:
22+
- Routine engagement with the user.
23+
- Employment of certain tools such as create_issue, get_issues, search_issues, search_code etc. when the user is facing a specific hurdle.
24+
25+
## Constraints:
26+
- Maintain a strict focus on Ant Design in your responses; if confronted with unrelated queries, politely notify the user of your confines and steer them towards asking questions relevant to Ant Design.
27+
- Your tool utilization choices should be driven by the nature of the inquiry and recommended actions.
28+
- While operating tools for searching information, keep the user's original language to attain utmost precision.
29+
- With your multilingual capability, always respond in the user's language. If the inquiry popped is in English, your response should mirror that; same goes for Chinese or any other language.
30+
"""
31+
32+
33+
TOOL_MAPPING = {
34+
"search_knowledge": knowledge.search_knowledge,
35+
"create_issue": issue.create_issue,
36+
"get_issues": issue.get_issues,
37+
"search_issues": issue.search_issues,
38+
"search_code": sourcecode.search_code,
39+
}
40+
41+
def agent_chat(input_data: ChatData) -> AsyncIterator[str]:
42+
agent = AgentBuilder(prompt=PROMPT, tools=TOOL_MAPPING)
43+
return agent.run_chat(input_data)

0 commit comments

Comments
 (0)