Skip to content

Commit 32389d4

Browse files
authored
Merge pull request #405 from LlmKira/dev
Add duckduckgo-search
2 parents 4beda09 + 06bc971 commit 32389d4

File tree

17 files changed

+145
-453
lines changed

17 files changed

+145
-453
lines changed

README.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.
8181
- `Login via url`: Use `/login <a token>$<something like https://provider.com/login>` to Login. The program posts the token to the interface to
8282
retrieve configuration
8383
information, [how to develop this](https://github.com/LlmKira/Openaibot/blob/81eddbff0f136697d5ad6e13ee1a7477b26624ed/app/components/credential.py#L20).
84-
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>` to login
84+
- `Login`: Use `/login https://<api endpoint>/v1$<api key>$<the model>$<tool model such as gpt-3.5-turbo>` to login
8585

8686
### 🧀 Plugin Can Do More
8787

@@ -97,6 +97,7 @@ or [one-api](https://github.com/songquanpeng/one-api) independently.
9797
| Discord ||| |
9898
| Kook ||| Does not support `triggering by reply` |
9999
| Slack ||| Does not support `triggering by reply` |
100+
| Line || | |
100101
| QQ || | |
101102
| Wechat || | |
102103
| Twitter || | |

app/middleware/llm_task.py

+1
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ def pair_check(_messages):
102102
new_list.append(_messages[i])
103103
new_list.append(_messages[-1])
104104
if isinstance(_messages[-1], AssistantMessage) and _messages[-1].tool_calls:
105+
logger.warning("llm_task:the last AssistantMessage not paired, be careful")
105106
new_list.extend(mock_tool_message(_messages[-1], "[On Queue]"))
106107
return new_list
107108

app/receiver/function.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -273,10 +273,10 @@ async def run_pending_task(task: TaskHeader, pending_task: ToolCall):
273273
logger.debug(f"Read History:{history}")
274274
continue_ = await logic.llm_continue(
275275
context=f"History:{history},ToolCallResult:{run_status}",
276-
condition="Would you like to continue a chat?",
276+
condition="If there is still any action that needs to be performed",
277277
default=False,
278278
)
279-
if continue_.continue_it:
279+
if continue_.boolean:
280280
logger.debug(
281281
"ToolCall run out, resign a new request to request stop sign."
282282
)

app/sender/discord/__init__.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
# @Software: PyCharm
66
import base64
77
import binascii
8-
import json
98
import random
109
from typing import List
1110

@@ -36,6 +35,7 @@
3635
is_empty_command,
3736
uid_make,
3837
save_credential,
38+
dict2markdown,
3939
)
4040
from llmkira.openapi.trigger import get_trigger_loop
4141
from ...components.credential import Credential, ProviderError
@@ -391,10 +391,7 @@ async def listen_env_command(ctx: crescent.Context, env_string: str):
391391
"**🧊 Env parse failed...O_o**\n", separator="\n"
392392
)
393393
else:
394-
text = formatting.format_text(
395-
f"**🧊 Updated**\n" f"```json\n{json.dumps(env_map, indent=2)}```",
396-
separator="\n",
397-
)
394+
text = convert(dict2markdown(env_map))
398395
await ctx.respond(
399396
ephemeral=True,
400397
content=text,

app/sender/kook/__init__.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
# @Author : sudoskys
44
# @File : __init__.py.py
55
# @Software: PyCharm
6-
import json
76
import random
87
from typing import List
98

@@ -33,6 +32,7 @@
3332
is_empty_command,
3433
uid_make,
3534
save_credential,
35+
dict2markdown,
3636
)
3737
from llmkira.openapi.trigger import get_trigger_loop
3838
from ...components.credential import ProviderError, Credential
@@ -396,10 +396,7 @@ async def listen_env_command(msg: Message, env_string: str):
396396
"**🧊 Env parse failed...O_o**\n", separator="\n"
397397
)
398398
else:
399-
text = formatting.format_text(
400-
f"**🧊 Updated**\n" f"```json\n{json.dumps(env_map, indent=2)}```",
401-
separator="\n",
402-
)
399+
text = convert(dict2markdown(env_map))
403400
await msg.reply(
404401
is_temp=True,
405402
type=MessageTypes.KMD,

app/sender/slack/__init__.py

+6-8
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
# @Author : sudoskys
44
# @File : __init__.py.py
55
# @Software: PyCharm
6-
import json
76
import time
87
from ssl import SSLContext
98
from typing import List
@@ -24,6 +23,7 @@
2423
parse_command,
2524
uid_make,
2625
login,
26+
dict2markdown,
2727
)
2828
from app.setting.slack import BotSetting
2929
from llmkira.kv_manager.env import EnvManager
@@ -239,10 +239,12 @@ async def listen_login_command(ack: AsyncAck, respond: AsyncRespond, command):
239239
async def listen_env_command(ack: AsyncAck, respond: AsyncRespond, command):
240240
command: SlashCommand = SlashCommand.model_validate(command)
241241
await ack()
242+
_manager = EnvManager(user_id=uid_make(__sender__, command.user_id))
242243
if not command.text:
243-
return
244+
env_map = await _manager.read_env()
245+
text = convert(dict2markdown(env_map))
246+
return await respond(text=text)
244247
_arg = command.text
245-
_manager = EnvManager(user_id=uid_make(__sender__, command.user_id))
246248
try:
247249
env_map = await _manager.set_env(
248250
env_value=_arg, update=True, return_all=True
@@ -251,11 +253,7 @@ async def listen_env_command(ack: AsyncAck, respond: AsyncRespond, command):
251253
logger.exception(f"[213562]env update failed {e}")
252254
text = formatting.mbold("🧊 Failed")
253255
else:
254-
text = formatting.format_text(
255-
formatting.mbold("🦴 Env Changed"),
256-
formatting.mcode(json.dumps(env_map, indent=2)),
257-
separator="\n",
258-
)
256+
text = convert(dict2markdown(env_map))
259257
await respond(text=text)
260258

261259
@bot.command(command="/clear")

app/sender/telegram/__init__.py

+9-8
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
# @Author : sudoskys
44
# @File : __init__.py.py
55
# @Software: PyCharm
6-
import json
76
from typing import Optional, Union, List
87

98
from loguru import logger
@@ -22,6 +21,7 @@
2221
uid_make,
2322
login,
2423
TimerObjectContainer,
24+
dict2markdown,
2525
)
2626
from app.setting.telegram import BotSetting
2727
from llmkira.kv_manager.env import EnvManager
@@ -239,9 +239,14 @@ async def listen_login_command(message: types.Message):
239239
@bot.message_handler(commands="env", chat_types=["private"])
240240
async def listen_env_command(message: types.Message):
241241
_cmd, _arg = parse_command(command=message.text)
242-
if not _arg:
243-
return None
244242
_manager = EnvManager(user_id=uid_make(__sender__, message.from_user.id))
243+
if not _arg:
244+
env_map = await _manager.read_env()
245+
return await bot.reply_to(
246+
message,
247+
text=convert(dict2markdown(env_map)),
248+
parse_mode="MarkdownV2",
249+
)
245250
try:
246251
env_map = await _manager.set_env(
247252
env_value=_arg, update=True, return_all=True
@@ -252,11 +257,7 @@ async def listen_env_command(message: types.Message):
252257
formatting.mbold("🧊 Failed"), separator="\n"
253258
)
254259
else:
255-
text = formatting.format_text(
256-
formatting.mbold("🦴 Env Changed"),
257-
formatting.mcode(json.dumps(env_map, indent=2)),
258-
separator="\n",
259-
)
260+
text = convert(dict2markdown(env_map))
260261
await bot.reply_to(message, text=text, parse_mode="MarkdownV2")
261262

262263
@bot.message_handler(

app/sender/util_func.py

+7
Original file line numberDiff line numberDiff line change
@@ -223,3 +223,10 @@ def clear_objects(self, user_id):
223223
"""
224224
if user_id in self.users:
225225
self.users[user_id] = {}
226+
227+
228+
def dict2markdown(maps: dict):
229+
content = "**🦴 Env**\n"
230+
for key, value in maps.items():
231+
content += f"- **`{key}`**: `{value}`\n"
232+
return content

llmkira/extra/plugins/search/__init__.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
from llmkira.sdk.tools.schema import FuncPair, BaseTool # noqa: E402
1818
from llmkira.task import Task, TaskHeader # noqa: E402
1919
from llmkira.task.schema import Location, ToolResponse, EventMessage # noqa: E402
20-
from .engine import SerperSearchEngine, build_search_tips # noqa: E402
20+
from .engine import SerperSearchEngine, build_search_tips, search_in_duckduckgo # noqa: E402
2121

2222

2323
class Search(BaseModel):
@@ -26,7 +26,9 @@ class Search(BaseModel):
2626

2727

2828
@resign_plugin_executor(tool=Search)
29-
async def search_on_serper(search_sentence: str, api_key: str):
29+
async def search_on_serper(search_sentence: str, api_key: str = None):
30+
if not api_key:
31+
return search_in_duckduckgo(search_sentence)
3032
result = await SerperSearchEngine(api_key=api_key).search(search_sentence)
3133
return build_search_tips(search_items=result)
3234

@@ -160,15 +162,15 @@ async def run(
160162
_set = Search.model_validate(arg)
161163
_search_result = await search_on_serper(
162164
search_sentence=_set.keywords,
163-
api_key=env.get("SERPER_API_KEY"),
165+
api_key=env.get("SERPER_API_KEY", None),
164166
)
165167
# META
166168
_meta = task.task_sign.reprocess(
167169
plugin_name=__plugin_name__,
168170
tool_response=[
169171
ToolResponse(
170172
name=__plugin_name__,
171-
function_response=str(_search_result),
173+
function_response=f"SearchData: {_search_result},Please give reference link when use it.",
172174
tool_call_id=pending_task.id,
173175
tool_call=pending_task,
174176
)

llmkira/extra/plugins/search/engine.py

+23
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from typing import List
33

44
import requests
5+
from duckduckgo_search import AsyncDDGS
56
from loguru import logger
67
from pydantic import BaseModel
78

@@ -46,6 +47,28 @@ async def search(self, search_term: str) -> List[SearchEngineResult]:
4647
return _result
4748

4849

50+
async def search_in_duckduckgo(search_sentence: str):
51+
try:
52+
search_result = await AsyncDDGS().text(
53+
search_sentence, safesearch="off", timelimit="y", max_results=10
54+
)
55+
except Exception as e:
56+
raise ValueError(
57+
f"Search Failed: DuckDuckGo Error now not available: {type(e)}"
58+
)
59+
else:
60+
_build_result = []
61+
for result in search_result:
62+
_build_result.append(
63+
SearchEngineResult(
64+
title=result.get("title", "Undefined"),
65+
link=result.get("Href", "Undefined"),
66+
snippet=result.get("body", "Undefined"),
67+
)
68+
)
69+
return _build_result
70+
71+
4972
def build_search_tips(search_items: List[SearchEngineResult], limit=5):
5073
search_tips = []
5174
assert isinstance(

llmkira/extra/voice/__init__.py

+6-8
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
from typing import Optional
55

66
import aiohttp
7-
import edge_tts
7+
from gtts import gTTS
88
from loguru import logger
99

1010

@@ -99,17 +99,15 @@ async def request_reecho_speech(
9999
return None
100100

101101

102-
async def request_edge_speech(text: str, voice: str = "en-GB-SoniaNeural"):
102+
async def request_google_speech(text: str):
103103
try:
104-
communicate = edge_tts.Communicate(text, voice)
105104
byte_io = BytesIO()
106-
async for chunk in communicate.stream():
107-
if chunk["type"] == "audio":
108-
byte_io.write(chunk["data"])
105+
tts = gTTS(text)
106+
tts.write_to_fp(byte_io)
109107
byte_io.seek(0)
110108
return byte_io.getvalue()
111109
except Exception as e:
112-
logger.warning(f"Edge TTS Error: {e}")
110+
logger.warning(f"google TTS Error: {e}")
113111
return None
114112

115113

@@ -170,4 +168,4 @@ async def request_en(text) -> Optional[bytes]:
170168
if nai:
171169
return nai
172170
else:
173-
return await request_edge_speech(text)
171+
return await request_google_speech(text)

llmkira/logic/__init__.py

+55-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Optional
1+
from typing import Optional, Type
22

33
from loguru import logger
44
from pydantic import BaseModel, Field, SecretStr
@@ -14,9 +14,13 @@ class whether(BaseModel):
1414

1515
yes_no: bool = Field(description="Whether the condition is true or false")
1616
comment_to_user: Optional[str] = Field(
17-
default="", description="Comment on the decision"
17+
default="", description="Comment on the decision in user language"
1818
)
1919

20+
@property
21+
def boolean(self):
22+
return self.yes_no
23+
2024

2125
class continue_act(BaseModel):
2226
"""
@@ -25,9 +29,13 @@ class continue_act(BaseModel):
2529

2630
continue_it: bool = Field(description="Whether to continue execution")
2731
comment_to_user: Optional[str] = Field(
28-
default="", description="Comment on the decision"
32+
default="", description="Comment on the decision in user language"
2933
)
3034

35+
@property
36+
def boolean(self):
37+
return self.continue_it
38+
3139

3240
class LLMLogic(object):
3341
"""
@@ -77,3 +85,47 @@ async def llm_continue(self, context: str, condition: str, default: bool):
7785
except Exception as e:
7886
logger.error(f"llm_continue error: {e}")
7987
return continue_act(continue_it=default)
88+
89+
async def deserialization(
90+
self, context: str, model: Type[BaseModel]
91+
) -> Optional[BaseModel]:
92+
"""
93+
Serialize the string to model
94+
"""
95+
try:
96+
result = await OpenAI(
97+
model=self.api_model,
98+
messages=[UserMessage(content=context)],
99+
).extract(
100+
response_model=model,
101+
session=OpenAICredential(
102+
api_key=SecretStr(self.api_key),
103+
base_url=self.api_endpoint,
104+
model=self.api_model,
105+
),
106+
)
107+
return result
108+
except Exception as e:
109+
logger.error(f"logic:serialization error: {e}")
110+
return None
111+
112+
async def serialization(self, model: BaseModel) -> Optional[UserMessage]:
113+
"""
114+
Serialize the model to string
115+
"""
116+
try:
117+
result = await OpenAI(
118+
model=self.api_model,
119+
messages=[UserMessage(content=model.model_dump_json())],
120+
).extract(
121+
response_model=UserMessage,
122+
session=OpenAICredential(
123+
api_key=SecretStr(self.api_key),
124+
base_url=self.api_endpoint,
125+
model=self.api_model,
126+
),
127+
)
128+
return result
129+
except Exception as e:
130+
logger.error(f"logic:serialization error: {e}")
131+
return None

0 commit comments

Comments
 (0)