Skip to content

Commit 1870445

Browse files
authored
Merge pull request #110 from cagostino/chris/hitl
adding in a human in the loop capability, specifically when discussing with reasoning models
2 parents b7da76d + d3ee1b5 commit 1870445

File tree

4 files changed

+222
-3
lines changed

4 files changed

+222
-3
lines changed

examples/deep_think_check.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
from typing import Generator, Dict, List, Any, Optional
2+
import re
3+
4+
from npcsh.llm_funcs import enter_reasoning_human_in_the_loop
5+
6+
if __name__ == "__main__":
7+
# Example usage
8+
messages = [
9+
{
10+
"role": "user",
11+
"content": "Tell me a joke about my favorite animal and my favorite city",
12+
},
13+
]
14+
15+
for chunk in enter_reasoning_human_in_the_loop(
16+
messages, "deepseek-reasoner", "deepseek"
17+
):
18+
chunk_content = "".join(
19+
choice.delta.content
20+
for choice in chunk.choices
21+
if choice.delta.content is not None
22+
)
23+
print(chunk_content, end="")

npcsh/llm_funcs.py

Lines changed: 197 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -810,7 +810,7 @@ def check_llm_command(
810810
811811
The format of the JSON object is:
812812
{{
813-
"action": "execute_command" | "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence",
813+
"action": "execute_command" | "invoke_tool" | "answer_question" | "pass_to_npc" | "execute_sequence" | "request_input",
814814
"tool_name": "<tool_name(s)_if_applicable>",
815815
"explanation": "<your_explanation>",
816816
"npc_name": "<npc_name_if_applicable>"
@@ -1438,3 +1438,199 @@ def get_data_response(
14381438
failures.append(str(e))
14391439

14401440
return {"response": "Max retries exceeded", "code": 400}
1441+
1442+
1443+
def enter_reasoning_human_in_the_loop(
1444+
messages: List[Dict[str, str]],
1445+
reasoning_model: str = NPCSH_REASONING_MODEL,
1446+
reasoning_provider: str = NPCSH_REASONING_PROVIDER,
1447+
chat_model: str = NPCSH_CHAT_MODEL,
1448+
chat_provider: str = NPCSH_CHAT_PROVIDER,
1449+
npc: Any = None,
1450+
answer_only: bool = False,
1451+
) -> Generator[str, None, None]:
1452+
"""
1453+
Stream responses while checking for think tokens and handling human input when needed.
1454+
1455+
Args:
1456+
messages: List of conversation messages
1457+
model: LLM model to use
1458+
provider: Model provider
1459+
npc: NPC instance if applicable
1460+
1461+
Yields:
1462+
Streamed response chunks
1463+
"""
1464+
# Get the initial stream
1465+
if answer_only:
1466+
messages[-1]["content"] = (
1467+
messages[-1]["content"].replace(
1468+
"Think first though and use <think> tags", ""
1469+
)
1470+
+ " Do not think just answer. "
1471+
)
1472+
else:
1473+
messages[-1]["content"] = (
1474+
messages[-1]["content"]
1475+
+ " Think first though and use <think> tags. "
1476+
)
1477+
1478+
response_stream = get_stream(
1479+
messages, model=reasoning_model, provider=reasoning_provider, npc=npc
1480+
)
1481+
1482+
thoughts = []
1483+
response_chunks = []
1484+
in_think_block = False
1485+
1486+
for chunk in response_stream:
1487+
# Extract content based on provider/model type
1488+
if reasoning_provider == "ollama":
1489+
chunk_content = chunk.get("message", {}).get("content", "")
1490+
elif reasoning_provider == "openai" or reasoning_provider == "deepseek":
1491+
chunk_content = "".join(
1492+
choice.delta.content
1493+
for choice in chunk.choices
1494+
if choice.delta.content is not None
1495+
)
1496+
elif reasoning_provider == "anthropic":
1497+
if chunk.type == "content_block_delta":
1498+
chunk_content = chunk.delta.text
1499+
else:
1500+
chunk_content = ""
1501+
else:
1502+
# Default extraction
1503+
chunk_content = str(chunk)
1504+
1505+
# Always yield the chunk whether in think block or not
1506+
response_chunks.append(chunk_content)
1507+
# Track think block state and accumulate thoughts
1508+
if answer_only:
1509+
yield chunk
1510+
else:
1511+
if "<th" in "".join(response_chunks) and "/th" not in "".join(
1512+
response_chunks
1513+
):
1514+
in_think_block = True
1515+
1516+
if in_think_block:
1517+
thoughts.append(chunk_content)
1518+
yield chunk # Show the thoughts as they come
1519+
1520+
if "</th" in "".join(response_chunks):
1521+
thought_text = "".join(thoughts)
1522+
# Analyze thoughts before stopping
1523+
input_needed = analyze_thoughts_for_input(
1524+
thought_text, model=chat_model, provider=chat_provider
1525+
)
1526+
1527+
if input_needed:
1528+
# If input needed, get it and restart with new context
1529+
user_input = request_user_input(input_needed)
1530+
1531+
messages.append(
1532+
{
1533+
"role": "assistant",
1534+
"content": f"""its clear that extra input is required.
1535+
could you please provide it? Here is the reason:
1536+
1537+
{input_needed['reason']},
1538+
1539+
and the prompt: {input_needed['prompt']}""",
1540+
}
1541+
)
1542+
1543+
messages.append({"role": "user", "content": user_input})
1544+
yield from enter_reasoning_human_in_the_loop(
1545+
messages,
1546+
reasoning_model=reasoning_model,
1547+
reasoning_provider=reasoning_provider,
1548+
chat_model=chat_model,
1549+
chat_provider=chat_provider,
1550+
npc=npc,
1551+
answer_only=True,
1552+
)
1553+
else:
1554+
# If no input needed, just get the answer
1555+
messages.append({"role": "assistant", "content": thought_text})
1556+
messages.append(
1557+
{"role": "user", "content": messages[-2]["content"]}
1558+
)
1559+
yield from enter_reasoning_human_in_the_loop( # Restart with new context
1560+
messages,
1561+
reasoning_model=reasoning_model,
1562+
reasoning_provider=reasoning_provider,
1563+
chat_model=chat_model,
1564+
chat_provider=chat_provider,
1565+
npc=npc,
1566+
answer_only=True,
1567+
)
1568+
1569+
return # Stop the original stream in either case
1570+
1571+
1572+
def analyze_thoughts_for_input(
1573+
thought_text: str,
1574+
model: str = NPCSH_CHAT_MODEL,
1575+
provider: str = NPCSH_CHAT_PROVIDER,
1576+
) -> Optional[Dict[str, str]]:
1577+
"""
1578+
Analyze accumulated thoughts to determine if user input is needed.
1579+
1580+
Args:
1581+
thought_text: Accumulated text from think block
1582+
messages: Conversation history
1583+
1584+
Returns:
1585+
Dict with input request details if needed, None otherwise
1586+
"""
1587+
1588+
prompt = (
1589+
f"""
1590+
Analyze these thoughts:
1591+
{thought_text}
1592+
and determine if additional user input would be helpful.
1593+
Return a JSON object with:"""
1594+
+ """
1595+
{
1596+
"input_needed": boolean,
1597+
"request_reason": string explaining why input is needed,
1598+
"request_prompt": string to show user if input needed
1599+
}
1600+
Consider things like:
1601+
- Ambiguity in the user's request
1602+
- Missing context that would help provide a better response
1603+
- Clarification needed about user preferences/requirements
1604+
Only request input if it would meaningfully improve the response.
1605+
Do not include any additional markdown formatting or leading ```json tags. Your response
1606+
must be a valid JSON object.
1607+
"""
1608+
)
1609+
1610+
response = get_llm_response(
1611+
prompt, model=model, provider=provider, messages=[], format="json"
1612+
)
1613+
1614+
result = response.get("response", {})
1615+
if isinstance(result, str):
1616+
result = json.loads(result)
1617+
1618+
if result.get("input_needed"):
1619+
return {
1620+
"reason": result["request_reason"],
1621+
"prompt": result["request_prompt"],
1622+
}
1623+
1624+
1625+
def request_user_input(input_request: Dict[str, str]) -> str:
1626+
"""
1627+
Request and get input from user.
1628+
1629+
Args:
1630+
input_request: Dict with reason and prompt for input
1631+
1632+
Returns:
1633+
User's input text
1634+
"""
1635+
print(f"\nAdditional input needed: {input_request['reason']}")
1636+
return input(f"{input_request['prompt']}: ")

npcsh/stream.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,7 @@ def get_deepseek_stream(
579579
messages_copy.insert(0, {"role": "system", "content": system_message})
580580

581581
completion = client.chat.completions.create(
582-
model="deepseek-chat",
582+
model=model,
583583
messages=messages,
584584
tools=tools,
585585
stream=True,

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def get_setup_message():
5757

5858
setup(
5959
name="npcsh",
60-
version="0.3.10",
60+
version="0.3.11",
6161
packages=find_packages(exclude=["tests*"]),
6262
install_requires=[
6363
"redis",

0 commit comments

Comments
 (0)