Skip to content

v0.5.1 add listener on 'page' in context, add screenshots, fix Laminar.flush #100

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Apr 4, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -264,6 +264,6 @@ async for chunk in client.agent.run(
):
if chunk.chunkType == 'step':
print(chunk.summary)
else if chunk.chunkType == 'finalOutput':
elif chunk.chunkType == 'finalOutput':
print(chunk.content.result.content)
```
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -6,7 +6,7 @@

[project]
name = "lmnr"
version = "0.5.0"
version = "0.5.1"
description = "Python SDK for Laminar"
authors = [
{ name = "lmnr.ai", email = "founders@lmnr.ai" }
5 changes: 2 additions & 3 deletions src/lmnr/openllmetry_sdk/__init__.py
Original file line number Diff line number Diff line change
@@ -67,6 +67,5 @@ def init(
)

@staticmethod
def flush():
if getattr(TracerManager, "__tracer_wrapper", None):
TracerManager.__tracer_wrapper.flush()
def flush() -> bool:
return TracerManager.__tracer_wrapper.flush()
7 changes: 6 additions & 1 deletion src/lmnr/openllmetry_sdk/tracing/tracing.py
Original file line number Diff line number Diff line change
@@ -236,8 +236,13 @@ def clear(cls):
cls.__span_id_to_path = {}
cls.__span_id_lists = {}

def flush(self):
def shutdown(self):
self.__spans_processor.force_flush()
self.__spans_processor.shutdown()
self.__tracer_provider.shutdown()

def flush(self):
return self.__spans_processor.force_flush()

def get_tracer(self):
return self.__tracer_provider.get_tracer(TRACER_NAME)
94 changes: 90 additions & 4 deletions src/lmnr/sdk/browser/playwright_otel.py
Original file line number Diff line number Diff line change
@@ -22,8 +22,11 @@
from wrapt import wrap_function_wrapper

try:
from playwright.async_api import Browser
from playwright.sync_api import Browser as SyncBrowser
from playwright.async_api import Browser, BrowserContext
from playwright.sync_api import (
Browser as SyncBrowser,
BrowserContext as SyncBrowserContext,
)
except ImportError as e:
raise ImportError(
f"Attempted to import {__file__}, but it is designed "
@@ -84,8 +87,12 @@ def _wrap_new_browser_sync(
set_span_in_context(span, get_current())
_context_spans[id(context)] = span
span.set_attribute("lmnr.internal.has_browser_session", True)
trace_id = format(span.get_span_context().trace_id, "032x")
context.on(
"page",
lambda page: handle_navigation_sync(page, session_id, trace_id, client),
)
for page in context.pages:
trace_id = format(span.get_span_context().trace_id, "032x")
handle_navigation_sync(page, session_id, trace_id, client)
return browser

@@ -106,12 +113,67 @@ async def _wrap_new_browser_async(
set_span_in_context(span, get_current())
_context_spans[id(context)] = span
span.set_attribute("lmnr.internal.has_browser_session", True)
trace_id = format(span.get_span_context().trace_id, "032x")

async def handle_page_navigation(page):
return await handle_navigation_async(page, session_id, trace_id, client)

context.on("page", handle_page_navigation)
for page in context.pages:
trace_id = format(span.get_span_context().trace_id, "032x")
await handle_navigation_async(page, session_id, trace_id, client)
return browser


@with_tracer_and_client_wrapper
def _wrap_new_context_sync(
tracer: Tracer, client: LaminarClient, to_wrap, wrapped, instance, args, kwargs
):
context: SyncBrowserContext = wrapped(*args, **kwargs)
session_id = str(uuid.uuid4().hex)
span = get_current_span()
if span == INVALID_SPAN:
span = tracer.start_span(
name=f"{to_wrap.get('object')}.{to_wrap.get('method')}"
)
set_span_in_context(span, get_current())
_context_spans[id(context)] = span
span.set_attribute("lmnr.internal.has_browser_session", True)
trace_id = format(span.get_span_context().trace_id, "032x")

context.on(
"page",
lambda page: handle_navigation_sync(page, session_id, trace_id, client),
)
for page in context.pages:
handle_navigation_sync(page, session_id, trace_id, client)
return context


@with_tracer_and_client_wrapper
async def _wrap_new_context_async(
tracer: Tracer, client: AsyncLaminarClient, to_wrap, wrapped, instance, args, kwargs
):
context: SyncBrowserContext = await wrapped(*args, **kwargs)
session_id = str(uuid.uuid4().hex)
span = get_current_span()
if span == INVALID_SPAN:
span = tracer.start_span(
name=f"{to_wrap.get('object')}.{to_wrap.get('method')}"
)
set_span_in_context(span, get_current())
_context_spans[id(context)] = span
span.set_attribute("lmnr.internal.has_browser_session", True)
trace_id = format(span.get_span_context().trace_id, "032x")

async def handle_page_navigation(page):
return await handle_navigation_async(page, session_id, trace_id, client)

context.on("page", handle_page_navigation)
for page in context.pages:
await handle_navigation_async(page, session_id, trace_id, client)
return context


@with_tracer_and_client_wrapper
def _wrap_close_browser_sync(
tracer: Tracer,
@@ -191,6 +253,18 @@ async def _wrap_close_browser_async(
"method": "close",
"wrapper": _wrap_close_browser_sync,
},
{
"package": "playwright.sync_api",
"object": "Browser",
"method": "new_context",
"wrapper": _wrap_new_context_sync,
},
{
"package": "playwright.sync_api",
"object": "BrowserType",
"method": "launch_persistent_context",
"wrapper": _wrap_new_context_sync,
},
]

WRAPPED_METHODS_ASYNC = [
@@ -230,6 +304,18 @@ async def _wrap_close_browser_async(
"method": "close",
"wrapper": _wrap_close_browser_async,
},
{
"package": "playwright.async_api",
"object": "Browser",
"method": "new_context",
"wrapper": _wrap_new_context_async,
},
{
"package": "playwright.sync_api",
"object": "BrowserType",
"method": "launch_persistent_context",
"wrapper": _wrap_new_context_sync,
},
]


14 changes: 7 additions & 7 deletions src/lmnr/sdk/browser/pw_utils.py
Original file line number Diff line number Diff line change
@@ -36,7 +36,7 @@
() => {
const BATCH_SIZE = 1000; // Maximum events to store in memory

window.lmnrRrwebEventsBatch = [];
window.lmnrRrwebEventsBatch = new Set();

// Utility function to compress individual event data
async function compressEventData(data) {
@@ -50,8 +50,8 @@

window.lmnrGetAndClearEvents = () => {
const events = window.lmnrRrwebEventsBatch;
window.lmnrRrwebEventsBatch = [];
return events;
window.lmnrRrwebEventsBatch = new Set();
return Array.from(events);
};

// Add heartbeat events
@@ -62,11 +62,11 @@
timestamp: Date.now()
};

window.lmnrRrwebEventsBatch.push(heartbeat);
window.lmnrRrwebEventsBatch.add(heartbeat);

// Prevent memory issues by limiting batch size
if (window.lmnrRrwebEventsBatch.length > BATCH_SIZE) {
window.lmnrRrwebEventsBatch = window.lmnrRrwebEventsBatch.slice(-BATCH_SIZE);
if (window.lmnrRrwebEventsBatch.size > BATCH_SIZE) {
window.lmnrRrwebEventsBatch = new Set(Array.from(window.lmnrRrwebEventsBatch).slice(-BATCH_SIZE));
}
}, 1000);

@@ -81,7 +81,7 @@
...event,
data: await compressEventData(event.data)
};
window.lmnrRrwebEventsBatch.push(compressedEvent);
window.lmnrRrwebEventsBatch.add(compressedEvent);
}
});
}
13 changes: 9 additions & 4 deletions src/lmnr/sdk/client/asynchronous/resources/agent.py
Original file line number Diff line number Diff line change
@@ -35,6 +35,7 @@ async def run(
model_provider: Optional[ModelProvider] = None,
model: Optional[str] = None,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> AsyncIterator[RunAgentResponseChunk]:
"""Run Laminar index agent in streaming mode.

@@ -45,7 +46,7 @@ async def run(
model_provider (Optional[ModelProvider], optional): LLM model provider
model (Optional[str], optional): LLM model name
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.

return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
Returns:
AsyncIterator[RunAgentResponseChunk]: a generator of response chunks
"""
@@ -59,6 +60,7 @@ async def run(
model_provider: Optional[ModelProvider] = None,
model: Optional[str] = None,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> AgentOutput:
"""Run Laminar index agent.

@@ -68,7 +70,7 @@ async def run(
model_provider (Optional[ModelProvider], optional): LLM model provider
model (Optional[str], optional): LLM model name
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.

return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
Returns:
AgentOutput: agent output
"""
@@ -83,6 +85,7 @@ async def run(
model: Optional[str] = None,
stream: Literal[False] = False,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> AgentOutput:
"""Run Laminar index agent.

@@ -93,7 +96,7 @@ async def run(
model (Optional[str], optional): LLM model name
stream (Literal[False], optional): whether to stream the agent's response
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.

return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
Returns:
AgentOutput: agent output
"""
@@ -107,6 +110,7 @@ async def run(
model: Optional[str] = None,
stream: bool = False,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> Union[AgentOutput, Awaitable[AsyncIterator[RunAgentResponseChunk]]]:
"""Run Laminar index agent.

@@ -117,7 +121,7 @@ async def run(
model (Optional[str], optional): LLM model name
stream (bool, optional): whether to stream the agent's response
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.

return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
Returns:
Union[AgentOutput, AsyncIterator[RunAgentResponseChunk]]: agent output or a generator of response chunks
"""
@@ -146,6 +150,7 @@ async def run(
# https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
stream=True,
enable_thinking=enable_thinking,
return_screenshots=return_screenshots,
)

# For streaming case, use a generator function
12 changes: 9 additions & 3 deletions src/lmnr/sdk/client/synchronous/resources/agent.py
Original file line number Diff line number Diff line change
@@ -27,6 +27,7 @@ def run(
model_provider: Optional[ModelProvider] = None,
model: Optional[str] = None,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> Generator[RunAgentResponseChunk, None, None]:
"""Run Laminar index agent in streaming mode.

@@ -37,7 +38,7 @@ def run(
model_provider (Optional[ModelProvider], optional): LLM model provider
model (Optional[str], optional): LLM model name
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.

return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
Returns:
Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
"""
@@ -51,6 +52,7 @@ def run(
model_provider: Optional[ModelProvider] = None,
model: Optional[str] = None,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> AgentOutput:
"""Run Laminar index agent.

@@ -60,6 +62,7 @@ def run(
model_provider (Optional[ModelProvider], optional): LLM model provider
model (Optional[str], optional): LLM model name
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.

Returns:
AgentOutput: agent output
@@ -75,6 +78,7 @@ def run(
model: Optional[str] = None,
stream: Literal[False] = False,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> AgentOutput:
"""Run Laminar index agent.

@@ -85,7 +89,7 @@ def run(
model (Optional[str], optional): LLM model name
stream (Literal[False], optional): whether to stream the agent's response
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
cdp_url (Optional[str], optional): CDP URL to connect to an existing browser session.
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.

Returns:
AgentOutput: agent output
@@ -100,6 +104,7 @@ def run(
model: Optional[str] = None,
stream: bool = False,
enable_thinking: bool = True,
return_screenshots: bool = False,
) -> Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]:
"""Run Laminar index agent.

@@ -110,7 +115,7 @@ def run(
model (Optional[str], optional): LLM model name
stream (bool, optional): whether to stream the agent's response
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
cdp_url (Optional[str], optional): CDP URL to connect to an existing browser session.
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.

Returns:
Union[AgentOutput, Generator[RunAgentResponseChunk, None, None]]: agent output or a generator of response chunks
@@ -140,6 +145,7 @@ def run(
# https://aws.amazon.com/blogs/networking-and-content-delivery/introducing-nlb-tcp-configurable-idle-timeout/
stream=True,
enable_thinking=enable_thinking,
return_screenshots=return_screenshots,
)

# For streaming case, use a generator function
Loading
Loading