Skip to content

Commit 2c3337c

Browse files
authored
Add Ollama provider with response schema support & create LLM provider directory (#3306)
* add response_schema support to ollama.py * Create separate llm provider directory, add response_schema to ollama provider * Update timesketch.conf
1 parent cd6532e commit 2c3337c

13 files changed

+133
-97
lines changed

data/timesketch.conf

+4-4
Original file line numberDiff line numberDiff line change
@@ -379,16 +379,16 @@ LLM_PROVIDER_CONFIGS = {
379379
'project_id': '',
380380
},
381381
},
382-
'llm_summarization': {
382+
'llm_summarize': {
383383
'aistudio': {
384384
'model': 'gemini-2.0-flash-exp',
385385
'project_id': '',
386386
},
387387
},
388388
'default': {
389-
'aistudio': {
390-
'api_key': '',
391-
'model': 'gemini-2.0-flash-exp',
389+
'ollama': {
390+
'server_url': 'http://ollama:11434',
391+
'model': 'gemma:7b',
392392
},
393393
}
394394
}

timesketch/api/v1/resources/llm_summarize.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@
2828
from flask_restful import Resource
2929

3030
from timesketch.api.v1 import resources, export
31-
from timesketch.lib import definitions, llms, utils
31+
from timesketch.lib import definitions, utils
32+
from timesketch.lib.llms.providers import manager as provider_manager
3233
from timesketch.lib.definitions import METRICS_NAMESPACE
3334
from timesketch.models.sketch import Sketch
3435

@@ -304,8 +305,8 @@ def _get_content(
304305
configured LLM provider
305306
"""
306307
try:
307-
feature_name = "llm_summarization"
308-
llm = llms.manager.LLMManager.create_provider(feature_name=feature_name)
308+
feature_name = "llm_summarize"
309+
llm = provider_manager.LLMManager.create_provider(feature_name=feature_name)
309310
except Exception as e: # pylint: disable=broad-except
310311
logger.error("Error LLM Provider: %s", e)
311312
abort(

timesketch/api/v1/resources/nl2q.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
import pandas as pd
2727

2828
from timesketch.api.v1 import utils
29-
from timesketch.lib.llms import manager
29+
from timesketch.lib.llms.providers import manager
3030
from timesketch.lib.definitions import HTTP_STATUS_CODE_BAD_REQUEST
3131
from timesketch.lib.definitions import HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR
3232
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND

timesketch/api/v1/resources_test.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1198,7 +1198,7 @@ class TestNl2qResource(BaseTest):
11981198

11991199
resource_url = "/api/v1/sketches/1/nl2q/"
12001200

1201-
@mock.patch("timesketch.lib.llms.manager.LLMManager.create_provider")
1201+
@mock.patch("timesketch.lib.llms.providers.manager.LLMManager.create_provider")
12021202
@mock.patch("timesketch.api.v1.utils.run_aggregator")
12031203
@mock.patch("timesketch.api.v1.resources.OpenSearchDataStore", MockDataStore)
12041204
def test_nl2q_prompt(self, mock_aggregator, mock_create_provider):
@@ -1380,7 +1380,7 @@ def test_nl2q_no_permission(self):
13801380
)
13811381
self.assertEqual(response.status_code, HTTP_STATUS_CODE_FORBIDDEN)
13821382

1383-
@mock.patch("timesketch.lib.llms.manager.LLMManager.create_provider")
1383+
@mock.patch("timesketch.lib.llms.providers.manager.LLMManager.create_provider")
13841384
@mock.patch("timesketch.api.v1.utils.run_aggregator")
13851385
@mock.patch("timesketch.api.v1.resources.OpenSearchDataStore", MockDataStore)
13861386
def test_nl2q_llm_error(self, mock_aggregator, mock_create_provider):
@@ -1584,7 +1584,7 @@ def test_llm_summarize_no_events(self):
15841584
)
15851585

15861586
@mock.patch("timesketch.api.v1.resources.OpenSearchDataStore", MockDataStore)
1587-
@mock.patch("timesketch.lib.llms.manager.LLMManager.create_provider")
1587+
@mock.patch("timesketch.lib.llms.providers.manager.LLMManager.create_provider")
15881588
def test_llm_summarize_with_events(self, mock_create_provider):
15891589
"""Test LLM summarizer with events returned and mock LLM."""
15901590
self.login()

timesketch/lib/llms/__init__.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google Inc. All rights reserved.
1+
# Copyright 2025 Google Inc. All rights reserved.
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -11,8 +11,4 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
"""LLM module for Timesketch."""
15-
16-
from timesketch.lib.llms import ollama
17-
from timesketch.lib.llms import vertexai
18-
from timesketch.lib.llms import aistudio
14+
"""LLM libraries for Timesketch."""

timesketch/lib/llms/ollama.py

-72
This file was deleted.
+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Copyright 2024 Google Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
"""LLM providers for Timesketch."""
15+
16+
from timesketch.lib.llms.providers import ollama
17+
from timesketch.lib.llms.providers import vertexai
18+
from timesketch.lib.llms.providers import aistudio

timesketch/lib/llms/aistudio.py timesketch/lib/llms/providers/aistudio.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Copyright 2024 Google Inc. All rights reserved.
1+
# Copyright 2025 Google Inc. All rights reserved.
22
#
33
# Licensed under the Apache License, Version 2.0 (the "License");
44
# you may not use this file except in compliance with the License.
@@ -15,8 +15,8 @@
1515

1616
import json
1717
from typing import Optional
18-
from timesketch.lib.llms import interface
19-
from timesketch.lib.llms import manager
18+
from timesketch.lib.llms.providers import interface
19+
from timesketch.lib.llms.providers import manager
2020

2121

2222
# Check if the required dependencies are installed.
File renamed without changes.

timesketch/lib/llms/manager.py timesketch/lib/llms/providers/manager.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"""This file contains a class for managing Large Language Model (LLM) providers."""
1515

1616
from flask import current_app
17-
from timesketch.lib.llms.interface import LLMProvider
17+
from timesketch.lib.llms.providers.interface import LLMProvider
1818

1919

2020
class LLMManager:
@@ -80,7 +80,6 @@ def create_provider(cls, feature_name: str = None, **kwargs) -> LLMProvider:
8080
raise ValueError(
8181
"Configuration for the feature must specify exactly one provider."
8282
)
83-
8483
provider_name = next(iter(config_mapping))
8584
provider_config = config_mapping[provider_name]
8685

timesketch/lib/llms/manager_test.py timesketch/lib/llms/providers/manager_test.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
"""Tests for LLM provider manager."""
1515

1616
from timesketch.lib.testlib import BaseTest
17-
from timesketch.lib.llms import manager
17+
from timesketch.lib.llms.providers import manager
1818

1919

2020
class MockAistudioProvider:
+94
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
# Copyright 2025 Google Inc. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
"""A LLM provider for the Ollama server."""
15+
from typing import Optional
16+
import json
17+
import requests
18+
19+
from timesketch.lib.llms.providers import interface
20+
from timesketch.lib.llms.providers import manager
21+
22+
23+
class Ollama(interface.LLMProvider):
24+
"""A LLM provider for the Ollama server."""
25+
26+
NAME = "ollama"
27+
28+
def _post(self, request_body: str) -> requests.Response:
29+
"""
30+
Make a POST request to the Ollama server.
31+
32+
Args:
33+
request_body: The body of the request in JSON format.
34+
35+
Returns:
36+
The response from the server as a requests.Response object.
37+
"""
38+
api_resource = "/api/chat"
39+
url = self.config.get("server_url") + api_resource
40+
return requests.post(
41+
url, data=request_body, headers={"Content-Type": "application/json"}
42+
)
43+
44+
def generate(self, prompt: str, response_schema: Optional[dict] = None) -> str:
45+
"""
46+
Generate text using the Ollama server, optionally with a JSON schema.
47+
48+
Args:
49+
prompt: The prompt to use for the generation.
50+
response_schema: An optional JSON schema to define the expected
51+
response format.
52+
53+
Returns:
54+
The generated text as a string (or parsed data if
55+
response_schema is provided).
56+
57+
Raises:
58+
ValueError: If the request fails or JSON parsing fails.
59+
"""
60+
request_body = {
61+
"messages": [{"role": "user", "content": prompt}],
62+
"model": self.config.get("model"),
63+
"stream": self.config.get("stream"),
64+
"options": {
65+
"temperature": self.config.get("temperature"),
66+
"num_predict": self.config.get("max_output_tokens"),
67+
"top_p": self.config.get("top_p"),
68+
"top_k": self.config.get("top_k"),
69+
},
70+
}
71+
72+
if response_schema:
73+
request_body["format"] = response_schema
74+
75+
response = self._post(json.dumps(request_body))
76+
77+
if response.status_code != 200:
78+
raise ValueError(f"Error generating text: {response.text}")
79+
80+
response_data = response.json()
81+
text_response = response_data.get("message", {}).get("content", "").strip()
82+
83+
if response_schema:
84+
try:
85+
return json.loads(text_response)
86+
except json.JSONDecodeError as error:
87+
raise ValueError(
88+
f"Error JSON parsing text: {text_response}: {error}"
89+
) from error
90+
91+
return text_response
92+
93+
94+
manager.LLMManager.register_provider(Ollama)

timesketch/lib/llms/vertexai.py timesketch/lib/llms/providers/vertexai.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@
1616
import json
1717
from typing import Optional
1818

19-
from timesketch.lib.llms import interface
20-
from timesketch.lib.llms import manager
19+
from timesketch.lib.llms.providers import interface
20+
from timesketch.lib.llms.providers import manager
2121

2222
# Check if the required dependencies are installed.
2323
has_required_deps = True

0 commit comments

Comments
 (0)