From f33e41a2b64d7ef80d60f5de61c8596a1ac339ad Mon Sep 17 00:00:00 2001 From: Rohan Mehta Date: Tue, 3 Jun 2025 23:06:31 -0400 Subject: [PATCH] Add run_demo_loop REPL utility --- docs/ja/repl.md | 22 ++++++++++++++ docs/ref/repl.md | 6 ++++ docs/repl.md | 19 ++++++++++++ mkdocs.yml | 3 ++ src/agents/__init__.py | 2 ++ src/agents/repl.py | 65 ++++++++++++++++++++++++++++++++++++++++++ tests/test_repl.py | 28 ++++++++++++++++++ 7 files changed, 145 insertions(+) create mode 100644 docs/ja/repl.md create mode 100644 docs/ref/repl.md create mode 100644 docs/repl.md create mode 100644 src/agents/repl.py create mode 100644 tests/test_repl.py diff --git a/docs/ja/repl.md b/docs/ja/repl.md new file mode 100644 index 000000000..108c12b90 --- /dev/null +++ b/docs/ja/repl.md @@ -0,0 +1,22 @@ +--- +search: + exclude: true +--- +# REPL ユーティリティ + +`run_demo_loop` を使うと、ターミナルから手軽にエージェントを試せます。 + +```python +import asyncio +from agents import Agent, run_demo_loop + +async def main() -> None: + agent = Agent(name="Assistant", instructions="あなたは親切なアシスタントです") + await run_demo_loop(agent) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +`run_demo_loop` は入力を繰り返し受け取り、会話履歴を保持したままエージェントを実行します。既定ではストリーミング出力を表示します。 +`quit` または `exit` と入力するか `Ctrl-D` を押すと終了します。 diff --git a/docs/ref/repl.md b/docs/ref/repl.md new file mode 100644 index 000000000..a064a9bff --- /dev/null +++ b/docs/ref/repl.md @@ -0,0 +1,6 @@ +# `repl` + +::: agents.repl + options: + members: + - run_demo_loop diff --git a/docs/repl.md b/docs/repl.md new file mode 100644 index 000000000..073b87f51 --- /dev/null +++ b/docs/repl.md @@ -0,0 +1,19 @@ +# REPL utility + +The SDK provides `run_demo_loop` for quick interactive testing. + +```python +import asyncio +from agents import Agent, run_demo_loop + +async def main() -> None: + agent = Agent(name="Assistant", instructions="You are a helpful assistant.") + await run_demo_loop(agent) + +if __name__ == "__main__": + asyncio.run(main()) +``` + +`run_demo_loop` prompts for user input in a loop, keeping the conversation +history between turns. By default it streams model output as it is produced. +Type `quit` or `exit` (or press `Ctrl-D`) to leave the loop. diff --git a/mkdocs.yml b/mkdocs.yml index ad719670c..a58258a93 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -59,6 +59,7 @@ plugins: - running_agents.md - results.md - streaming.md + - repl.md - tools.md - mcp.md - handoffs.md @@ -80,6 +81,7 @@ plugins: - ref/index.md - ref/agent.md - ref/run.md + - ref/repl.md - ref/tool.md - ref/result.md - ref/stream_events.md @@ -139,6 +141,7 @@ plugins: - running_agents.md - results.md - streaming.md + - repl.md - tools.md - mcp.md - handoffs.md diff --git a/src/agents/__init__.py b/src/agents/__init__.py index 820616437..ee2f2aa16 100644 --- a/src/agents/__init__.py +++ b/src/agents/__init__.py @@ -45,6 +45,7 @@ from .models.openai_chatcompletions import OpenAIChatCompletionsModel from .models.openai_provider import OpenAIProvider from .models.openai_responses import OpenAIResponsesModel +from .repl import run_demo_loop from .result import RunResult, RunResultStreaming from .run import RunConfig, Runner from .run_context import RunContextWrapper, TContext @@ -160,6 +161,7 @@ def enable_verbose_stdout_logging(): "ToolsToFinalOutputFunction", "ToolsToFinalOutputResult", "Runner", + "run_demo_loop", "Model", "ModelProvider", "ModelTracing", diff --git a/src/agents/repl.py b/src/agents/repl.py new file mode 100644 index 000000000..9a4f30759 --- /dev/null +++ b/src/agents/repl.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import Any + +from openai.types.responses.response_text_delta_event import ResponseTextDeltaEvent + +from .agent import Agent +from .items import ItemHelpers, TResponseInputItem +from .result import RunResultBase +from .run import Runner +from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent + + +async def run_demo_loop(agent: Agent[Any], *, stream: bool = True) -> None: + """Run a simple REPL loop with the given agent. + + This utility allows quick manual testing and debugging of an agent from the + command line. Conversation state is preserved across turns. Enter ``exit`` + or ``quit`` to stop the loop. + + Args: + agent: The starting agent to run. + stream: Whether to stream the agent output. + """ + + current_agent = agent + input_items: list[TResponseInputItem] = [] + while True: + try: + user_input = input(" > ") + except (EOFError, KeyboardInterrupt): + print() + break + if user_input.strip().lower() in {"exit", "quit"}: + break + if not user_input: + continue + + input_items.append({"role": "user", "content": user_input}) + + result: RunResultBase + if stream: + result = Runner.run_streamed(current_agent, input=input_items) + async for event in result.stream_events(): + if isinstance(event, RawResponsesStreamEvent): + if isinstance(event.data, ResponseTextDeltaEvent): + print(event.data.delta, end="", flush=True) + elif isinstance(event, RunItemStreamEvent): + if event.item.type == "tool_call_item": + print("\n[tool called]", flush=True) + elif event.item.type == "tool_call_output_item": + print(f"\n[tool output: {event.item.output}]", flush=True) + elif event.item.type == "message_output_item": + message = ItemHelpers.text_message_output(event.item) + print(message, end="", flush=True) + elif isinstance(event, AgentUpdatedStreamEvent): + print(f"\n[Agent updated: {event.new_agent.name}]", flush=True) + print() + else: + result = await Runner.run(current_agent, input_items) + if result.final_output is not None: + print(result.final_output) + + current_agent = result.last_agent + input_items = result.to_input_list() diff --git a/tests/test_repl.py b/tests/test_repl.py new file mode 100644 index 000000000..7ba2011be --- /dev/null +++ b/tests/test_repl.py @@ -0,0 +1,28 @@ +import pytest + +from agents import Agent, run_demo_loop + +from .fake_model import FakeModel +from .test_responses import get_text_input_item, get_text_message + + +@pytest.mark.asyncio +async def test_run_demo_loop_conversation(monkeypatch, capsys): + model = FakeModel() + model.add_multiple_turn_outputs([[get_text_message("hello")], [get_text_message("good")]]) + + agent = Agent(name="test", model=model) + + inputs = iter(["Hi", "How are you?", "quit"]) + monkeypatch.setattr("builtins.input", lambda _=" > ": next(inputs)) + + await run_demo_loop(agent, stream=False) + + output = capsys.readouterr().out + assert "hello" in output + assert "good" in output + assert model.last_turn_args["input"] == [ + get_text_input_item("Hi"), + get_text_message("hello").model_dump(exclude_unset=True), + get_text_input_item("How are you?"), + ]