Skip to content

Commit 8d1ab34

Browse files
authored
Merge pull request #111 from cagostino/chris/hitl
adding in human in the loop request for input in the check_llm_command.
2 parents 1870445 + 68b6602 commit 8d1ab34

File tree

2 files changed

+81
-3
lines changed

2 files changed

+81
-3
lines changed

npcsh/llm_funcs.py

Lines changed: 80 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -752,6 +752,7 @@ def check_llm_command(
752752
4. Would this question be best answered by an alternative NPC?
753753
5. Is it a complex request that actually requires more than one
754754
tool to be called, perhaps in a sequence?
755+
6. is there a need for the user to provide additional input to fulfill the request?
755756
756757
757758
@@ -784,7 +785,7 @@ def check_llm_command(
784785
prompt += f"""
785786
In considering how to answer this, consider:
786787
- Whether it can be answered via a bash command on the user's computer. e.g. if a user is curious about file sizes within a directory or about processes running on their computer, these are likely best handled by a bash command.
787-
788+
- Whether more context from the user is required to adequately answer the question. e.g. if a user asks for a joke about their favorite city but they don't include the city , it would be helpful to ask for that information. Similarly, if a user asks to open a browser and to check the weather in a city, it would be helpful to ask for the city and which website or source to use.
788789
- Whether a tool should be used.
789790
790791
Excluding time-sensitive phenomena,
@@ -799,7 +800,7 @@ def check_llm_command(
799800
ensure the best user experience.
800801
801802
Respond with a JSON object containing:
802-
- "action": one of ["execute_command", "invoke_tool", "answer_question", "pass_to_npc", "execute_sequence"]
803+
- "action": one of ["execute_command", "invoke_tool", "answer_question", "pass_to_npc", "execute_sequence", "request_input"]
803804
- "tool_name": : if action is "invoke_tool": the name of the tool to use.
804805
else if action is "execute_sequence", a list of tool names to use.
805806
- "explanation": a brief explanation of why you chose this action.
@@ -925,6 +926,46 @@ def check_llm_command(
925926
retrieved_docs=retrieved_docs,
926927
n_docs=n_docs,
927928
)
929+
elif action == "request_input":
930+
explanation = response_content_parsed.get("explanation")
931+
932+
request_input = handle_request_input(
933+
f"Explanation from check_llm_command: {explanation} \n for the user input command: {command}",
934+
model=model,
935+
provider=provider,
936+
)
937+
# pass it back through with the request input added to the end of the messages
938+
# so that we can re-pass the result through the check_llm_command.
939+
940+
messages.append(
941+
{
942+
"role": "assistant",
943+
"content": f"""its clear that extra input is required.
944+
could you please provide it? Here is the reason:
945+
946+
{explanation},
947+
948+
and the prompt: {command}""",
949+
}
950+
)
951+
messages.append(
952+
{
953+
"role": "user",
954+
"content": command + " \n \n \n extra context: " + request_input,
955+
}
956+
)
957+
958+
return check_llm_command(
959+
command + " \n \n \n extra context: " + request_input,
960+
command_history,
961+
model=model,
962+
provider=provider,
963+
npc=npc,
964+
messages=messages,
965+
retrieved_docs=retrieved_docs,
966+
n_docs=n_docs,
967+
)
968+
928969
elif action == "execute_sequence":
929970
tool_names = response_content_parsed.get("tool_name")
930971
output = ""
@@ -1569,6 +1610,43 @@ def enter_reasoning_human_in_the_loop(
15691610
return # Stop the original stream in either case
15701611

15711612

1613+
def handle_request_input(
1614+
context: str,
1615+
model: str = NPCSH_CHAT_MODEL,
1616+
provider: str = NPCSH_CHAT_PROVIDER,
1617+
):
1618+
"""
1619+
Analyze text and decide what to request from the user
1620+
"""
1621+
prompt = f"""
1622+
Analyze the text:
1623+
{context}
1624+
and determine what additional input is needed.
1625+
Return a JSON object with:
1626+
{{
1627+
"input_needed": boolean,
1628+
"request_reason": string explaining why input is needed,
1629+
"request_prompt": string to show user if input needed
1630+
}}
1631+
1632+
Do not include any additional markdown formatting or leading ```json tags. Your response
1633+
must be a valid JSON object.
1634+
"""
1635+
1636+
response = get_llm_response(
1637+
prompt, model=model, provider=provider, messages=[], format="json"
1638+
)
1639+
1640+
result = response.get("response", {})
1641+
if isinstance(result, str):
1642+
result = json.loads(result)
1643+
1644+
user_input = request_user_input(
1645+
{"reason": result["request_reason"], "prompt": result["request_prompt"]}
1646+
)
1647+
return user_input
1648+
1649+
15721650
def analyze_thoughts_for_input(
15731651
thought_text: str,
15741652
model: str = NPCSH_CHAT_MODEL,

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def get_setup_message():
5757

5858
setup(
5959
name="npcsh",
60-
version="0.3.11",
60+
version="0.3.12",
6161
packages=find_packages(exclude=["tests*"]),
6262
install_requires=[
6363
"redis",

0 commit comments

Comments
 (0)