diff --git a/CHANGELOG.md b/CHANGELOG.md index f17416c..0354e57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## 2025-01-25 + +### New Features +- DeepSeek LLM Support! +- Enso skills now use CDP wallet +- Add an API for frontend to link twitter account to an agent + +## 2025-01-24 + +### Improvements +- Refactor telegram services +- Save telegram user info to db when it linked to an agent + +### Bug Fixes +- Fix bug when twitter token refresh some skills will not work + ## 2025-01-23 ### Features diff --git a/README.md b/README.md index 71f9799..9f51b86 100644 --- a/README.md +++ b/README.md @@ -131,6 +131,10 @@ python -m app.autonomous "Create Agent" and "Try it out" refer to the Docker section. +## The Model +For now, we only support any model from OpenAI and DeepSeek. +We will support more models in the future. + ## Integrations ### Twitter diff --git a/app/config/config.py b/app/config/config.py index 83f67ff..ab93ded 100644 --- a/app/config/config.py +++ b/app/config/config.py @@ -79,6 +79,7 @@ def __init__(self): self.cdp_api_key_private_key = self.load("CDP_API_KEY_PRIVATE_KEY") # AI self.openai_api_key = self.load("OPENAI_API_KEY") + self.deepseek_api_key = self.load("DEEPSEEK_API_KEY") self.system_prompt = self.load("SYSTEM_PROMPT") # Autonomous # self.autonomous_entrypoint_interval = int( diff --git a/app/core/engine.py b/app/core/engine.py index b4d0189..78c540e 100644 --- a/app/core/engine.py +++ b/app/core/engine.py @@ -121,7 +121,15 @@ def initialize_agent(aid): raise HTTPException(status_code=500, detail=str(e)) # ==== Initialize LLM. - llm = ChatOpenAI(model_name=agent.model, openai_api_key=config.openai_api_key) + # TODO: model name whitelist + if agent.model.startswith("deepseek"): + llm = ChatOpenAI( + model_name=agent.model, + openai_api_key=config.deepseek_api_key, + openai_api_base="https://api.deepseek.com", + ) + else: + llm = ChatOpenAI(model_name=agent.model, openai_api_key=config.openai_api_key) # ==== Store buffered conversation history in memory. memory = PostgresSaver(get_coon()) @@ -258,8 +266,12 @@ def initialize_agent(aid): ("placeholder", "{messages}"), ] if twitter_prompt: - prompt_array.append(("system", twitter_prompt)) - if agent.prompt_append: + # deepseek only supports system prompt in the beginning + if agent.model.startswith("deepseek"): + prompt_array.insert(0, ("system", twitter_prompt)) + else: + prompt_array.append(("system", twitter_prompt)) + if agent.prompt_append and not agent.model.startswith("deepseek"): # Escape any curly braces in prompt_append escaped_append = agent.prompt_append.replace("{", "{{").replace("}", "}}") prompt_array.append(("system", escaped_append)) @@ -269,6 +281,10 @@ def formatted_prompt(state: AgentState): # logger.debug(f"[{aid}] formatted prompt: {state}") return prompt_temp.invoke({"messages": state["messages"]}) + # hack for deepseek + if agent.model == "deepseek-reasoner": + tools = [] + # Create ReAct Agent using the LLM and CDP Agentkit tools. agents[aid] = create_agent( llm, diff --git a/app/core/graph.py b/app/core/graph.py index 63a0674..84af613 100644 --- a/app/core/graph.py +++ b/app/core/graph.py @@ -261,6 +261,31 @@ class Agent,Tools otherClass def default_memory_manager(state: AgentState) -> AgentState: messages = state["messages"] # logger.debug("Before memory manager: %s", messages) + + # Merge adjacent HumanMessages + i = 0 + while i < len(messages) - 1: + if isinstance(messages[i], HumanMessage) and isinstance( + messages[i + 1], HumanMessage + ): + # Handle different content types + content1 = messages[i].content + content2 = messages[i + 1].content + + # Convert to list if string + if isinstance(content1, str): + content1 = [content1] + if isinstance(content2, str): + content2 = [content2] + + # Merge the contents + messages[i].content = content1 + content2 + + # Remove the second message + messages.pop(i + 1) + else: + i += 1 + if len(messages) <= 100: return state must_delete = len(messages) - 100 diff --git a/docs/create_agent.sh b/docs/create_agent.sh index 3529d4a..d96a406 100755 --- a/docs/create_agent.sh +++ b/docs/create_agent.sh @@ -17,6 +17,8 @@ AGENT_NAME="IntentKit" # AI model to use # https://platform.openai.com/docs/models#current-model-aliases +# you can also use "deepseek-reasoner" and "deepseek-chat" +# Notice: Currently, the deepseek-reasoner does not support any skills. MODEL="gpt-4o-mini" # Agent initial prompt (the role is system, daily user's role is user) diff --git a/example.env b/example.env index 28a284b..6704a0a 100644 --- a/example.env +++ b/example.env @@ -4,6 +4,8 @@ DEBUG_RESP=true OPENAI_API_KEY= +DEEPSEEK_API_KEY= + DB_HOST= DB_PORT= DB_USERNAME= diff --git a/skills/cdp/tx.py b/skills/cdp/tx.py index 34ba089..f4d07f2 100644 --- a/skills/cdp/tx.py +++ b/skills/cdp/tx.py @@ -183,9 +183,7 @@ class CdpBroadcastEnsoTx(CdpBaseTool): """ name: str = "cdp_broadcast_tx" - description: str = ( - "This tool broadcasts transaction using the calldata transaction body generated by the EnsoGetRouteShortcut tool which will be passed to you by user as a confirmation." - ) + description: str = "This tool broadcasts transaction using the calldata transaction body generated by the EnsoGetRouteShortcut tool which will be passed to you by user as a confirmation." args_schema: Type[BaseModel] = CdpBroadcastEnsoTxInput def _run(self, txRef: str) -> CdpBroadcastEnsoTxOutput: