forked from SamuelSchmidgall/AgentLaboratory
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathapp.py
333 lines (290 loc) · 12.5 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
import json
import os
import subprocess
import sys
from flask import Flask, request, jsonify, render_template
from flask_cors import CORS
from settings_manager import SettingsManager
from config import TASK_NOTE_LLM
from utils import validate_task_note_config
# Check if the WebUI repository is cloned
def check_webui_cloned():
if not os.path.exists("AgentLaboratoryWebUI"):
# Ask the user if they want to clone the repository
print("The WebUI repository is not cloned.")
answer = input("Would you like to clone it now from https://github.com/whats2000/AgentLaboratoryWebUI.git? (y/n) ")
if answer.lower() != "y":
print("Error: The WebUI repository is not cloned. Please clone it manually.")
sys.exit(1)
print("Cloning the WebUI repository...")
subprocess.run(["git", "clone", "https://github.com/whats2000/AgentLaboratoryWebUI.git"], check=True)
# Check if Node.js is installed
def check_node_installed():
try:
subprocess.run(["node", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
print("Error: Node.js is not installed. Please install it from https://nodejs.org/")
sys.exit(1)
# Check if Yarn is installed
def check_yarn_installed():
try:
subprocess.run(["yarn", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
# Ask the user if they want to install Yarn
print("Yarn is not installed.")
answer = input("Would you like to install it now? (y/n) ")
if answer.lower() != "y":
print("Error: Yarn is not installed. Please install it manually.")
sys.exit(1)
print("Installing Yarn...")
subprocess.run(["npm", "install", "-g", "yarn"], check=True)
# Build the WebUI
def build_webui():
webui_path = os.path.join(os.getcwd(), "AgentLaboratoryWebUI")
if not os.path.exists(os.path.join(webui_path, "dist")):
print("Building the WebUI...")
subprocess.run(["yarn", "install"], check=True, cwd=webui_path)
subprocess.run(["yarn", "build"], check=True, cwd=webui_path)
# Run the checks and build the WebUI
check_webui_cloned()
check_node_installed()
check_yarn_installed()
build_webui()
# Initialize the Flask app
app = Flask(
__name__,
static_url_path='',
static_folder='AgentLaboratoryWebUI/dist',
template_folder='AgentLaboratoryWebUI/dist'
)
CORS(app)
# Define default values
DEFAULT_SETTINGS = {
"research_topic": "",
"api_key": "",
"deepseek_api_key": "",
"google_api_key": "",
"anthropic_api_key": "",
"llm_backend": "o1-mini",
"custom_llm_backend": "",
"ollama_max_tokens": 2048,
"language": "English",
"copilot_mode": False,
"compile_latex": True,
"num_papers_lit_review": 5,
"mlesolver_max_steps": 3,
"papersolver_max_steps": 5,
}
settings_manager = SettingsManager()
def save_user_settings_from_dict(settings: dict):
"""Save settings using the SettingsManager."""
settings_manager.save_settings(settings)
def load_user_settings():
"""Load settings using the SettingsManager."""
return settings_manager.load_settings()
def get_existing_saves():
"""
Retrieve list of existing save files from the 'state_saves' directory.
"""
saves_dir = 'state_saves'
try:
os.makedirs(saves_dir, exist_ok=True)
saves = [f for f in os.listdir(saves_dir) if f.endswith('.pkl')]
return saves if saves else ["No saved states found"]
except Exception as e:
print(f"Error retrieving saves: {e}")
return ["No saved states found"]
def run_research_process(data: dict) -> str:
"""
Execute the research process based on the provided settings.
This is adapted from your original function.
"""
# Unpack parameters from the incoming JSON payload.
research_topic = data.get('research_topic', '')
api_key = data.get('api_key', '')
llm_backend = data.get('llm_backend', 'o1-mini')
custom_llm_backend = data.get('custom_llm_backend', '')
ollama_max_tokens = data.get('ollama_max_tokens', 2048)
language = data.get('language', 'English')
copilot_mode = data.get('copilot_mode', False)
compile_latex = data.get('compile_latex', True)
num_papers_lit_review = data.get('num_papers_lit_review', 5)
mlesolver_max_steps = data.get('mlesolver_max_steps', 3)
papersolver_max_steps = data.get('papersolver_max_steps', 5)
deepseek_api_key = data.get('deepseek_api_key', '')
google_api_key = data.get('google_api_key', '')
anthropic_api_key = data.get('anthropic_api_key', '')
load_existing = data.get('load_existing', False)
load_existing_path = data.get('load_existing_path', '')
# Choose backend based on the API key value.
if api_key.strip().lower() == "ollama":
chosen_backend = custom_llm_backend.strip() if custom_llm_backend.strip() else llm_backend
else:
chosen_backend = llm_backend
# Prepare the command arguments.
cmd = [
sys.executable, 'ai_lab_repo.py',
'--research-topic', research_topic,
'--llm-backend', chosen_backend,
'--language', language,
'--copilot-mode', str(copilot_mode).lower(),
'--compile-latex', str(compile_latex).lower(),
'--num-papers-lit-review', str(num_papers_lit_review),
'--mlesolver-max-steps', str(mlesolver_max_steps),
'--papersolver-max-steps', str(papersolver_max_steps)
]
# Append optional API keys if provided.
if api_key:
cmd.extend(['--api-key', api_key])
if deepseek_api_key:
cmd.extend(['--deepseek-api-key', deepseek_api_key])
if google_api_key:
cmd.extend(['--google-api-key', google_api_key])
if anthropic_api_key:
cmd.extend(['--anthropic-api-key', anthropic_api_key])
# Require at least one valid API key.
if not (api_key or deepseek_api_key or google_api_key or anthropic_api_key):
return "**Error starting research process:** No valid API key provided. At least one API key is required."
# Handle Ollama-specific requirements.
if api_key.strip().lower() == "ollama":
if not custom_llm_backend.strip():
return "**Error starting research process:** Custom LLM Backend is required for Ollama. Enter a custom model string or select a standard model."
if not ollama_max_tokens:
return "**Error starting research process:** Custom Max Tokens for Ollama is required. Enter a valid integer value."
cmd.extend(['--ollama-max-tokens', str(int(ollama_max_tokens))])
# If loading an existing research state, add the flags.
if load_existing and load_existing_path and load_existing_path != "No saved states found":
cmd.extend([
'--load-existing', 'True',
'--load-existing-path', os.path.join('state_saves', load_existing_path)
])
# Append task note config if the config file exists.
if os.path.exists('settings/task_note_llm_config.json'):
cmd.extend(['--task-note-llm-config-file', 'settings/task_note_llm_config.json'])
# Create a displayable command string.
command_str = ' '.join(
[arg if (arg == sys.executable or arg == "ai_lab_repo.py" or arg.startswith("--"))
else f'"{arg}"'
for arg in cmd]
)
markdown_status = f"**Command created:**\n```\n{command_str}\n```\n"
# Attempt to open a new terminal window and run the command.
try:
if sys.platform == 'win32':
subprocess.Popen(['start', 'cmd', '/k'] + cmd, shell=True)
elif sys.platform == 'darwin':
subprocess.Popen(['open', '-a', 'Terminal'] + cmd)
else:
subprocess.Popen(['x-terminal-emulator', '-e'] + cmd)
markdown_status += "\n**Research process started in a new terminal window.**"
except Exception as e:
markdown_status += f"\n**Error starting research process:** {e}"
return markdown_status
# Update the WebUI repository
def update_webui():
"""Pull the latest changes from the WebUI repository and rebuild if needed."""
webui_path = os.path.join(os.getcwd(), "AgentLaboratoryWebUI")
try:
# Check if we can pull updates
print("Checking for WebUI updates...")
result = subprocess.run(
["git", "pull"],
check=True,
cwd=webui_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
# If the output contains "Already up to date", no rebuild is needed
if "Already up to date" in result.stdout:
return {"status": "WebUI is already up to date", "updated": False}
# If we got here, changes were pulled, rebuild the UI
print("Rebuilding WebUI after update...")
subprocess.run(["yarn", "install"], check=True, cwd=webui_path)
subprocess.run(["yarn", "build"], check=True, cwd=webui_path)
return {"status": "WebUI has been updated and rebuilt successfully", "updated": True}
except subprocess.CalledProcessError as e:
error_message = f"Error updating WebUI: {e.stderr}"
print(error_message)
return {"status": error_message, "updated": False, "error": True}
except Exception as e:
error_message = f"Unexpected error updating WebUI: {str(e)}"
print(error_message)
return {"status": error_message, "updated": False, "error": True}
@app.route("/")
@app.route("/config")
@app.route("/monitor")
def hello():
return render_template("index.html")
# Endpoint to start the research process.
@app.route('/api/research', methods=['POST'])
def api_research():
data = request.get_json()
result = run_research_process(data)
return jsonify({"status": result})
# Endpoint to load or update settings.
@app.route('/api/settings', methods=['GET', 'POST'])
def api_settings():
if request.method == 'GET':
settings = load_user_settings()
# Merge with defaults to ensure all keys are present.
merged_settings = DEFAULT_SETTINGS.copy()
merged_settings.update(settings or {})
return jsonify(merged_settings)
elif request.method == 'POST':
settings = request.get_json()
save_user_settings_from_dict(settings)
return jsonify({"status": "Settings saved"})
# Endpoint to retrieve saved research states.
@app.route('/api/saves', methods=['GET'])
def api_saves():
saves = get_existing_saves()
return jsonify({"saves": saves})
# Endpoint to update the WebUI
@app.route('/api/updateWebUI', methods=['POST'])
def api_update_webui():
result = update_webui()
return jsonify(result)
# Endpoint to manage the task note LLM configuration
@app.route('/api/task_note_config', methods=['GET', 'POST'])
def api_task_note_config():
if request.method == 'GET':
config_file = os.path.join('settings', 'task_note_llm_config.json')
if os.path.exists(config_file):
try:
with open(config_file, 'r') as f:
config = json.load(f)
return jsonify(config)
except Exception as e:
return jsonify({
"status": "error",
"message": f"Error loading task note config: {str(e)}"
}), 500
else:
# Return default task note config from config.py
return jsonify(TASK_NOTE_LLM)
elif request.method == 'POST':
config = request.get_json()
# Validate configuration before saving
if not validate_task_note_config(config):
return jsonify({
"status": "error",
"message": "Invalid task note LLM configuration format"
}), 400
try:
os.makedirs('settings', exist_ok=True)
config_file = os.path.join('settings', 'task_note_llm_config.json')
# Save as task note JSON format
with open(config_file, 'w') as f:
json.dump(config, f, indent=2)
return jsonify({
"status": "success",
"message": "Task note LLM config saved successfully and set as current config"
})
except Exception as e:
return jsonify({
"status": "error",
"message": f"Error saving task note config: {str(e)}"
}), 500
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)