Skip to content

Commit e140ff0

Browse files
committed
updating readme, some llm functions and the observation mode to be more of a data mode
1 parent 99eff58 commit e140ff0

File tree

5 files changed

+158
-45
lines changed

5 files changed

+158
-45
lines changed

README.md

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,15 @@ Welcome to npcsh, the shell for interacting with NPCs (LLM-powered AI agents). n
44

55
Additionally, npcsh introduces a new paradigm of programming for LLMs: npcsh allows users to set up NPC profiles (a la npc_profile.npc) where a user sets the primary directive of the NPC, the tools they want the NPC to use, and other properties of the NPC. NPCs can interact with each other and their primary directives and properties make these relationships explicit through jinja references.
66

7+
## Dependencies
8+
- ollama
9+
- python >3.10
10+
11+
The default model is currently phi3. The user can change the model by setting the environment variable `NPCSH_MODEL` to the desired model name and to change the provider by setting the environment variable `NPCSH_PROVIDER` to the desired provider name.
12+
13+
The provider must be one of ['ollama', 'openai', 'anthropic'] and the model must be one available from those providers.
14+
15+
716
## compilation
817

918
Each NPC can be compiled to accomplish their primary directive and then any issues faced will be recorded and associated with the NPC so that it can reference it later through vector search. In any of the modes where a user requests input from an NPC, the NPC will include RAG search results before carrying out the request.
@@ -35,15 +44,17 @@ The LLM or specific NPC will take the user's request and try to write a command
3544
Use the Command NPC by typing ```/cmd <command>```. Chat with the Command NPC in spool mode by typing ```/spool cmd```.
3645
Use the Command NPC in the profiles of other NPCs by referencing it like ```{{cmd}}```.
3746

38-
### observation mode
47+
### Data NPC
48+
49+
Users can create schemas for recording observations and for exploring and analyzing data.
3950

40-
Users can create schemas for recording observations. The idea here is to more easily facilitate the recording of data for individuals in essentially any realm (e.g. recipe testing, one's own blood pressure or weight, books read, movies watched, daily mood, etc.) without needing to use a tangled web of applications to do so. Observations can be referenced by the generic npcsh LLM shell or by specific NPCs.
41-
Use the Observation NPC by typing ```/obs <observation>```.
51+
The Data NPC will asily facilitate the recording of data for individuals in essentially any realm (e.g. recipe testing, one's own blood pressure or weight, books read, movies watched, daily mood, etc.) without needing to use a tangled web of applications to do so. Observations can be referenced by the generic npcsh LLM shell or by specific NPCs.
52+
Use the Observation NPC by typing ```/data <observation>```.
4253
Chat with the Observation NPC in spool mode by typing ```/spool obs```.
43-
Use the Observation NPC in the profiles of other NPCs by referencing it like ```{{obs}}```.
54+
Use the Observation NPC in the profiles of other NPCs by referencing it like ```{{obs}}```. Exit by typing ```/dq```.
4455

4556

46-
### question mode
57+
### Question NPC
4758

4859
The user can submit a 1-shot question to a general LLM or to a specific NPC.
4960
Use it like

npcsh/llm_funcs.py

Lines changed: 81 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,15 @@
33
import os
44
import json
55
import ollama
6+
import sqlite3
67

7-
def get_ollama_conversion(messages, model):
8-
# print(messages)
8+
npcsh_model = os.environ.get("NPCSH_MODEL", "phi3")
9+
npcsh_provider = os.environ.get("NPCSH_PROVIDER", "ollama")
910

11+
12+
def get_ollama_conversation(messages, model):
1013
response = ollama.chat(model=model, messages=messages)
11-
messages.append(response["message"])
14+
messages.append(response)
1215
# print(response["message"])
1316
return messages
1417

@@ -62,7 +65,7 @@ def get_claude_response(prompt, model, format=None):
6265
pass
6366

6467

65-
def get_llm_response(prompt, provider="ollama", model="phi3", **kwargs):
68+
def get_llm_response(prompt, provider=npcsh_provider, model=npcsh_model, **kwargs):
6669
if provider == "ollama":
6770
return get_ollama_response(prompt, model, **kwargs)
6871
elif provider == "openai":
@@ -73,6 +76,80 @@ def get_llm_response(prompt, provider="ollama", model="phi3", **kwargs):
7376
return "Error: Invalid provider specified."
7477

7578

79+
def execute_data_operations(query, command_history):
80+
location = os.getcwd()
81+
prompt = f"""
82+
A user submitted this query: {query}
83+
You need to generate a script using python, R, or SQL that will accomplish the user's intent.
84+
85+
Respond ONLY with the procedure that should be executed.
86+
87+
Here are some examples:
88+
{{"data_operation": "<sql query>", 'engine': 'SQL'}}
89+
{{'data_operation': '<python script>', 'engine': 'PYTHON'}}
90+
{{'data_operation': '<r script>', 'engine': 'R'}}
91+
92+
You must reply with only ONE output.
93+
"""
94+
95+
response = get_llm_response(prompt, format="json")
96+
output = response
97+
command_history.add(query, [], json.dumps(output), location)
98+
print(response)
99+
100+
if response["engine"] == "SQL":
101+
db_path = os.path.expanduser("~/.npcsh_history.db")
102+
query = response["data_operation"]
103+
try:
104+
print(f"Executing query in SQLite database: {query}")
105+
with sqlite3.connect(db_path) as conn:
106+
cursor = conn.cursor()
107+
cursor.execute(query)
108+
result = cursor.fetchall()
109+
for row in result:
110+
print(row)
111+
except sqlite3.Error as e:
112+
print(f"SQLite error: {e}")
113+
except Exception as e:
114+
print(f"Error executing query: {e}")
115+
elif response["engine"] == "PYTHON":
116+
engine = "python"
117+
script = response["data_operation"]
118+
try:
119+
result = subprocess.run(
120+
f"echo '{script}' | {engine}",
121+
shell=True,
122+
text=True,
123+
capture_output=True,
124+
)
125+
if result.returncode == 0:
126+
print(result.stdout)
127+
else:
128+
print(f"Error executing script: {result.stderr}")
129+
except Exception as e:
130+
print(f"Error executing script: {e}")
131+
elif response["engine"] == "R":
132+
engine = "Rscript"
133+
script = response["data_operation"]
134+
try:
135+
result = subprocess.run(
136+
f"echo '{script}' | {engine}",
137+
shell=True,
138+
text=True,
139+
capture_output=True,
140+
)
141+
if result.returncode == 0:
142+
print(result.stdout)
143+
else:
144+
print(f"Error executing script: {result.stderr}")
145+
except Exception as e:
146+
print(f"Error executing script: {e}")
147+
else:
148+
print("Error: Invalid engine specified.")
149+
150+
return response
151+
152+
76153
def execute_llm_command(command, command_history):
77154
max_attempts = 5
78155
attempt = 0

npcsh/modes.py

Lines changed: 51 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
# modes.py
22
import os
33
import subprocess
4-
from .llm_funcs import get_ollama_conversion
4+
from .llm_funcs import (
5+
get_ollama_conversation,
6+
get_llm_response,
7+
execute_data_operations,
8+
)
59
import sqlite3
610

711

@@ -83,36 +87,60 @@ def save_note(note, command_history):
8387
# enter_notes_mode()
8488

8589

90+
def initial_table_print(cursor):
91+
cursor.execute(
92+
"SELECT name FROM sqlite_master WHERE type='table' AND name != 'command_history'"
93+
)
94+
tables = cursor.fetchall()
95+
96+
print("\nAvailable tables:")
97+
for i, table in enumerate(tables, 1):
98+
print(f"{i}. {table[0]}")
99+
100+
86101
def enter_observation_mode(command_history):
87102
conn = command_history.conn
88103
cursor = command_history.cursor
89104

90-
print("Entering observation mode. Type '/obsq' or '/oq' to exit.")
91-
105+
print("Entering observation mode. Type '/dq' or to exit.")
106+
n_times = 0
92107
while True:
93108
# Show available tables
94-
cursor.execute(
95-
"SELECT name FROM sqlite_master WHERE type='table' AND name != 'command_history'"
96-
)
97-
tables = cursor.fetchall()
109+
if n_times == 0:
110+
initial_table_print(cursor)
111+
112+
user_query = input(
113+
"""
114+
Enter a plain-text request or one using the dataframe manipulation framework of your choice.
115+
You can also have the data NPC ingest data into your database by pointing it to the right files.
116+
data>"""
117+
)
98118

99-
print("\nAvailable tables:")
100-
for i, table in enumerate(tables, 1):
101-
print(f"{i}. {table[0]}")
102-
print("n. Create new table")
103-
print("d. Delete a table")
104-
print("q. Exit observation mode")
119+
else:
120+
user_query = input(
121+
"""
122+
data>"""
123+
)
124+
print(user_query)
105125

106-
choice = input("Choose an option: ").strip().lower()
126+
response = execute_data_operations(user_query, command_history)
107127

108-
if choice == "q" or choice == "/obsq" or choice == "/oq":
109-
break
110-
elif choice == "n":
111-
create_new_table(cursor, conn)
112-
elif choice == "d":
113-
delete_table(cursor, conn)
114-
elif choice.isdigit() and 1 <= int(choice) <= len(tables):
115-
add_observation(cursor, conn, tables[int(choice) - 1][0])
128+
answer_prompt = f"""
129+
130+
Here is an input from the user:
131+
{user_query}
132+
Here is some useful data relevant to the query:
133+
{response}
134+
135+
Now write a query to write a final response to be delivered to the user.
136+
137+
Your answer must be in the format:
138+
{{"response": "Your response here."}}
139+
140+
"""
141+
final_response = get_llm_response(answer_prompt, format="json")
142+
print(final_response["response"])
143+
n_times += 1
116144

117145
conn.close()
118146
print("Exiting observation mode.")
@@ -140,7 +168,7 @@ def enter_spool_mode(command_history, inherit_last=0, model="llama3.1"):
140168
spool_context.append({"role": "user", "content": user_input})
141169

142170
# Process the spool context with LLM
143-
spool_context = get_ollama_conversion(spool_context, model=model)
171+
spool_context = get_ollama_conversation(spool_context, model=model)
144172

145173
command_history.add(
146174
user_input, ["spool"], spool_context[-1]["content"], os.getcwd()

npcsh/npcsh.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def execute_command(command, command_history, db_path, npc_compiler):
6161
output = enter_whisper_mode()
6262
elif command_name == "notes":
6363
output = enter_notes_mode(command_history)
64-
elif command_name == "obs":
64+
elif command_name == "data":
6565
print(db_path)
6666
output = enter_observation_mode(command_history)
6767
elif command_name == "cmd" or command_name == "command":
@@ -87,6 +87,9 @@ def execute_command(command, command_history, db_path, npc_compiler):
8787
def setup_readline():
8888
readline.set_history_length(1000)
8989
readline.parse_and_bind("set editing-mode vi")
90+
readline.parse_and_bind('"\e[A": history-search-backward')
91+
readline.parse_and_bind('"\e[B": history-search-forward')
92+
9093
readline.parse_and_bind('"\C-r": reverse-search-history')
9194

9295

setup.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,20 +2,14 @@
22

33
setup(
44
name="npcsh",
5-
version="0.1.0",
5+
version="0.1.1",
66
packages=find_packages(exclude=["tests*"]),
7-
install_requires=[
8-
'jinja2',
9-
'pandas',
10-
'ollama',
11-
'requests',
12-
'PyYAML'
13-
],
7+
install_requires=["jinja2", "pandas", "ollama", "requests", "PyYAML"],
148
entry_points={
15-
'console_scripts': [
16-
'npcsh=npcsh.npcsh:main',
9+
"console_scripts": [
10+
"npcsh=npcsh.npcsh:main",
1711
],
18-
},
12+
},
1913
author="Christopher Agostino",
2014
author_email="cjp.agostino@example.com",
2115
description="A way to use npcsh",
@@ -27,4 +21,4 @@
2721
"License :: OSI Approved :: MIT License",
2822
],
2923
python_requires=">=3.10",
30-
)
24+
)

0 commit comments

Comments
 (0)