Skip to content

Commit 47ded80

Browse files
authored
ollama[patch]: fix generation info (#30863)
#30778 (not released) broke all invocation modes of ChatOllama (intent was to remove `"message"` from `generation_info`, but we turned `generation_info` into `stream_resp["message"]`), resulting in validation errors.
1 parent cf2697e commit 47ded80

File tree

1 file changed

+12
-10
lines changed

1 file changed

+12
-10
lines changed

libs/partners/ollama/langchain_ollama/chat_models.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -719,6 +719,11 @@ def _iterate_over_stream(
719719
is_thinking = False
720720
for stream_resp in self._create_chat_stream(messages, stop, **kwargs):
721721
if not isinstance(stream_resp, str):
722+
if stream_resp.get("done") is True:
723+
generation_info = dict(stream_resp)
724+
_ = generation_info.pop("message", None)
725+
else:
726+
generation_info = None
722727
chunk = ChatGenerationChunk(
723728
message=AIMessageChunk(
724729
content=(
@@ -732,11 +737,7 @@ def _iterate_over_stream(
732737
),
733738
tool_calls=_get_tool_calls_from_response(stream_resp),
734739
),
735-
generation_info=(
736-
dict(stream_resp).pop("message", None)
737-
if stream_resp.get("done") is True
738-
else None
739-
),
740+
generation_info=generation_info,
740741
)
741742
if chunk.generation_info and (
742743
model := chunk.generation_info.get("model")
@@ -773,6 +774,11 @@ async def _aiterate_over_stream(
773774
is_thinking = False
774775
async for stream_resp in self._acreate_chat_stream(messages, stop, **kwargs):
775776
if not isinstance(stream_resp, str):
777+
if stream_resp.get("done") is True:
778+
generation_info = dict(stream_resp)
779+
_ = generation_info.pop("message", None)
780+
else:
781+
generation_info = None
776782
chunk = ChatGenerationChunk(
777783
message=AIMessageChunk(
778784
content=(
@@ -786,11 +792,7 @@ async def _aiterate_over_stream(
786792
),
787793
tool_calls=_get_tool_calls_from_response(stream_resp),
788794
),
789-
generation_info=(
790-
dict(stream_resp).pop("message", None)
791-
if stream_resp.get("done") is True
792-
else None
793-
),
795+
generation_info=generation_info,
794796
)
795797
if chunk.generation_info and (
796798
model := chunk.generation_info.get("model")

0 commit comments

Comments
 (0)