Skip to content

Commit 3ace697

Browse files
committed
core: Improve mypy config
1 parent 5686fed commit 3ace697

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+294
-321
lines changed

docs/docs/integrations/chat/litellm.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@
121121
},
122122
"outputs": [],
123123
"source": [
124-
"from langchain_litellm.chat_models import ChatLiteLLM\n",
124+
"from langchain_litellm import ChatLiteLLM\n",
125125
"\n",
126126
"llm = ChatLiteLLM(model=\"gpt-3.5-turbo\")"
127127
]

docs/docs/integrations/providers/litellm.mdx

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ pip install langchain-litellm
1010

1111
## Chat Models
1212
```python
13-
from langchain_litellm.chat_models import ChatLiteLLM
13+
from langchain_litellm import ChatLiteLLM
1414
```
1515
See more detail in the guide [here](/docs/integrations/chat/litellm).
1616

libs/core/langchain_core/_api/beta_decorator.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -225,10 +225,8 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
225225
new_doc = f".. beta::\n {details}\n\n{old_doc}\n"
226226

227227
if inspect.iscoroutinefunction(obj):
228-
finalized = finalize(awarning_emitting_wrapper, new_doc)
229-
else:
230-
finalized = finalize(warning_emitting_wrapper, new_doc)
231-
return cast("T", finalized)
228+
return finalize(awarning_emitting_wrapper, new_doc)
229+
return finalize(warning_emitting_wrapper, new_doc)
232230

233231
return beta
234232

libs/core/langchain_core/_api/deprecation.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,10 @@ def deprecate(
152152
_package: str = package,
153153
) -> T:
154154
"""Implementation of the decorator returned by `deprecated`."""
155-
from langchain_core.utils.pydantic import FieldInfoV1, FieldInfoV2
155+
from langchain_core.utils.pydantic import ( # type: ignore[attr-defined]
156+
FieldInfoV1,
157+
FieldInfoV2,
158+
)
156159

157160
def emit_warning() -> None:
158161
"""Emit the warning."""
@@ -395,10 +398,8 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
395398
"""
396399

397400
if inspect.iscoroutinefunction(obj):
398-
finalized = finalize(awarning_emitting_wrapper, new_doc)
399-
else:
400-
finalized = finalize(warning_emitting_wrapper, new_doc)
401-
return cast("T", finalized)
401+
return finalize(awarning_emitting_wrapper, new_doc)
402+
return finalize(warning_emitting_wrapper, new_doc)
402403

403404
return deprecate
404405

libs/core/langchain_core/callbacks/manager.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -2405,7 +2405,7 @@ def _configure(
24052405
run_tree.trace_id,
24062406
run_tree.dotted_order,
24072407
)
2408-
handler.run_map[str(run_tree.id)] = cast("Run", run_tree)
2408+
handler.run_map[str(run_tree.id)] = run_tree
24092409
for var, inheritable, handler_class, env_var in _configure_hooks:
24102410
create_one = (
24112411
env_var is not None

libs/core/langchain_core/document_loaders/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async def alazy_load(self) -> AsyncIterator[Document]:
8080
iterator = await run_in_executor(None, self.lazy_load)
8181
done = object()
8282
while True:
83-
doc = await run_in_executor(None, next, iterator, done) # type: ignore[call-arg, arg-type]
83+
doc = await run_in_executor(None, next, iterator, done)
8484
if doc is done:
8585
break
8686
yield doc # type: ignore[misc]

libs/core/langchain_core/embeddings/fake.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class FakeEmbeddings(Embeddings, BaseModel):
5252
"""The size of the embedding vector."""
5353

5454
def _get_embedding(self) -> list[float]:
55-
import numpy as np # type: ignore[import-not-found, import-untyped]
55+
import numpy as np
5656

5757
return list(np.random.default_rng().normal(size=self.size))
5858

@@ -109,7 +109,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
109109
"""The size of the embedding vector."""
110110

111111
def _get_embedding(self, seed: int) -> list[float]:
112-
import numpy as np # type: ignore[import-not-found, import-untyped]
112+
import numpy as np
113113

114114
# set the seed for the random generator
115115
rng = np.random.default_rng(seed)

libs/core/langchain_core/globals.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def set_verbose(value: bool) -> None:
2323
value: The new value for the `verbose` global setting.
2424
"""
2525
try:
26-
import langchain # type: ignore[import]
26+
import langchain # type: ignore[import-not-found]
2727

2828
# We're about to run some deprecated code, don't report warnings from it.
2929
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -57,7 +57,7 @@ def get_verbose() -> bool:
5757
The value of the `verbose` global setting.
5858
"""
5959
try:
60-
import langchain # type: ignore[import]
60+
import langchain
6161

6262
# We're about to run some deprecated code, don't report warnings from it.
6363
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -96,7 +96,7 @@ def set_debug(value: bool) -> None:
9696
value: The new value for the `debug` global setting.
9797
"""
9898
try:
99-
import langchain # type: ignore[import]
99+
import langchain
100100

101101
# We're about to run some deprecated code, don't report warnings from it.
102102
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -128,7 +128,7 @@ def get_debug() -> bool:
128128
The value of the `debug` global setting.
129129
"""
130130
try:
131-
import langchain # type: ignore[import]
131+
import langchain
132132

133133
# We're about to run some deprecated code, don't report warnings from it.
134134
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -164,7 +164,7 @@ def set_llm_cache(value: Optional["BaseCache"]) -> None:
164164
value: The new LLM cache to use. If `None`, the LLM cache is disabled.
165165
"""
166166
try:
167-
import langchain # type: ignore[import]
167+
import langchain
168168

169169
# We're about to run some deprecated code, don't report warnings from it.
170170
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -198,7 +198,7 @@ def get_llm_cache() -> "BaseCache":
198198
The value of the `llm_cache` global setting.
199199
"""
200200
try:
201-
import langchain # type: ignore[import]
201+
import langchain
202202

203203
# We're about to run some deprecated code, don't report warnings from it.
204204
# The user called the correct (non-deprecated) code path and shouldn't get

libs/core/langchain_core/indexing/api.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -394,7 +394,7 @@ def index(
394394
if cleanup == "scoped_full":
395395
scoped_full_cleanup_source_ids.add(source_id)
396396
# source ids cannot be None after for loop above.
397-
source_ids = cast("Sequence[str]", source_ids) # type: ignore[assignment]
397+
source_ids = cast("Sequence[str]", source_ids)
398398

399399
exists_batch = record_manager.exists([doc.uid for doc in hashed_docs])
400400

libs/core/langchain_core/language_models/base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def get_tokenizer() -> Any:
6161
every time it is called.
6262
"""
6363
try:
64-
from transformers import GPT2TokenizerFast # type: ignore[import]
64+
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
6565
except ImportError as e:
6666
msg = (
6767
"Could not import transformers python package. "

libs/core/langchain_core/language_models/chat_models.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ async def agenerate(
853853
run_manager.on_llm_end(
854854
LLMResult(
855855
generations=[res.generations], # type: ignore[list-item, union-attr]
856-
llm_output=res.llm_output, # type: ignore[list-item, union-attr]
856+
llm_output=res.llm_output, # type: ignore[union-attr]
857857
)
858858
)
859859
for run_manager, res in zip(run_managers, results)
@@ -1109,7 +1109,7 @@ async def _astream(
11091109
None,
11101110
next,
11111111
iterator,
1112-
done, # type: ignore[call-arg, arg-type]
1112+
done,
11131113
)
11141114
if item is done:
11151115
break

libs/core/langchain_core/language_models/llms.py

+5-5
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@ def batch(
455455
inputs[i : i + max_concurrency]
456456
for i in range(0, len(inputs), max_concurrency)
457457
]
458-
config = [{**c, "max_concurrency": None} for c in config] # type: ignore[misc]
458+
config = [{**c, "max_concurrency": None} for c in config]
459459
return [
460460
output
461461
for i, batch in enumerate(batches)
@@ -501,7 +501,7 @@ async def abatch(
501501
inputs[i : i + max_concurrency]
502502
for i in range(0, len(inputs), max_concurrency)
503503
]
504-
config = [{**c, "max_concurrency": None} for c in config] # type: ignore[misc]
504+
config = [{**c, "max_concurrency": None} for c in config]
505505
return [
506506
output
507507
for i, batch in enumerate(batches)
@@ -746,7 +746,7 @@ async def _astream(
746746
None,
747747
next,
748748
iterator,
749-
done, # type: ignore[call-arg, arg-type]
749+
done,
750750
)
751751
if item is done:
752752
break
@@ -1231,7 +1231,7 @@ async def agenerate(
12311231
stop,
12321232
run_managers, # type: ignore[arg-type]
12331233
new_arg_supported=bool(new_arg_supported),
1234-
**kwargs, # type: ignore[arg-type]
1234+
**kwargs,
12351235
)
12361236
if len(missing_prompts) > 0:
12371237
run_managers = await asyncio.gather(
@@ -1253,7 +1253,7 @@ async def agenerate(
12531253
stop,
12541254
run_managers, # type: ignore[arg-type]
12551255
new_arg_supported=bool(new_arg_supported),
1256-
**kwargs, # type: ignore[arg-type]
1256+
**kwargs,
12571257
)
12581258
llm_output = await aupdate_cache(
12591259
self.cache,

libs/core/langchain_core/messages/ai.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ def _backwards_compat_tool_calls(cls, values: dict) -> Any:
202202
raw_tool_calls := values.get("additional_kwargs", {}).get("tool_calls")
203203
):
204204
try:
205-
if issubclass(cls, AIMessageChunk): # type: ignore
205+
if issubclass(cls, AIMessageChunk):
206206
values["tool_call_chunks"] = default_tool_chunk_parser(
207207
raw_tool_calls
208208
)

libs/core/langchain_core/messages/base.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ def __add__(self, other: Any) -> ChatPromptTemplate:
119119
"""Concatenate this message with another message."""
120120
from langchain_core.prompts.chat import ChatPromptTemplate
121121

122-
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
122+
prompt = ChatPromptTemplate(messages=[self])
123123
return prompt + other
124124

125125
def pretty_repr(self, html: bool = False) -> str:
@@ -162,7 +162,7 @@ def merge_content(
162162
if isinstance(merged, str):
163163
# If the next chunk is also a string, then merge them naively
164164
if isinstance(content, str):
165-
merged = cast("str", merged) + content
165+
merged += content
166166
# If the next chunk is a list, add the current to the start of the list
167167
else:
168168
merged = [merged] + content # type: ignore
@@ -213,7 +213,7 @@ def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore
213213
# If both are (subclasses of) BaseMessageChunk,
214214
# concat into a single BaseMessageChunk
215215

216-
return self.__class__( # type: ignore[call-arg]
216+
return self.__class__(
217217
id=self.id,
218218
type=self.type,
219219
content=merge_content(self.content, other.content),

libs/core/langchain_core/messages/utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ def _create_message_from_message_type(
237237
if additional_kwargs:
238238
if response_metadata := additional_kwargs.pop("response_metadata", None):
239239
kwargs["response_metadata"] = response_metadata
240-
kwargs["additional_kwargs"] = additional_kwargs # type: ignore[assignment]
240+
kwargs["additional_kwargs"] = additional_kwargs
241241
additional_kwargs.update(additional_kwargs.pop("additional_kwargs", {}))
242242
if id is not None:
243243
kwargs["id"] = id
@@ -891,7 +891,7 @@ def list_token_counter(messages: Sequence[BaseMessage]) -> int:
891891
return sum(token_counter(msg) for msg in messages) # type: ignore[arg-type, misc]
892892

893893
else:
894-
list_token_counter = token_counter # type: ignore[assignment]
894+
list_token_counter = token_counter
895895
else:
896896
msg = (
897897
f"'token_counter' expected to be a model that implements "

libs/core/langchain_core/output_parsers/json.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from json import JSONDecodeError
77
from typing import Annotated, Any, Optional, TypeVar, Union
88

9-
import jsonpatch # type: ignore[import]
9+
import jsonpatch # type: ignore[import-untyped]
1010
import pydantic
1111
from pydantic import SkipValidation
1212

libs/core/langchain_core/output_parsers/openai_functions.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from types import GenericAlias
66
from typing import Any, Optional, Union
77

8-
import jsonpatch # type: ignore[import]
8+
import jsonpatch # type: ignore[import-untyped]
99
from pydantic import BaseModel, model_validator
1010

1111
from langchain_core.exceptions import OutputParserException
@@ -275,9 +275,9 @@ def parse_result(self, result: list[Generation], *, partial: bool = False) -> An
275275
else:
276276
pydantic_schema = self.pydantic_schema
277277
if hasattr(pydantic_schema, "model_validate_json"):
278-
pydantic_args = pydantic_schema.model_validate_json(_args) # type: ignore
278+
pydantic_args = pydantic_schema.model_validate_json(_args)
279279
else:
280-
pydantic_args = pydantic_schema.parse_raw(_args) # type: ignore
280+
pydantic_args = pydantic_schema.parse_raw(_args)
281281
return pydantic_args
282282

283283

libs/core/langchain_core/output_parsers/pydantic.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
class PydanticOutputParser(JsonOutputParser, Generic[TBaseModel]):
2121
"""Parse an output using a pydantic model."""
2222

23-
pydantic_object: Annotated[type[TBaseModel], SkipValidation()] # type: ignore
23+
pydantic_object: Annotated[type[TBaseModel], SkipValidation()]
2424
"""The pydantic model to parse."""
2525

2626
def _parse_obj(self, obj: dict) -> TBaseModel:

libs/core/langchain_core/output_parsers/xml.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ def parse(self, text: str) -> dict[str, Union[str, list[Any]]]:
205205
# likely if you're reading this you can move them to the top of the file
206206
if self.parser == "defusedxml":
207207
try:
208-
from defusedxml import ElementTree # type: ignore
208+
from defusedxml import ElementTree
209209
except ImportError as e:
210210
msg = (
211211
"defusedxml is not installed. "

libs/core/langchain_core/outputs/llm_result.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ class LLMResult(BaseModel):
5050
run: Optional[list[RunInfo]] = None
5151
"""List of metadata info for model call for each input."""
5252

53-
type: Literal["LLMResult"] = "LLMResult" # type: ignore[assignment]
53+
type: Literal["LLMResult"] = "LLMResult"
5454
"""Type is used exclusively for serialization purposes."""
5555

5656
def flatten(self) -> list[LLMResult]:

libs/core/langchain_core/prompts/chat.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def __add__(self, other: Any) -> ChatPromptTemplate:
126126
Returns:
127127
Combined prompt template.
128128
"""
129-
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
129+
prompt = ChatPromptTemplate(messages=[self])
130130
return prompt + other
131131

132132

@@ -1024,23 +1024,23 @@ def __add__(self, other: Any) -> ChatPromptTemplate:
10241024
if isinstance(other, ChatPromptTemplate):
10251025
return ChatPromptTemplate(messages=self.messages + other.messages).partial(
10261026
**partials
1027-
) # type: ignore[call-arg]
1027+
)
10281028
if isinstance(
10291029
other, (BaseMessagePromptTemplate, BaseMessage, BaseChatPromptTemplate)
10301030
):
10311031
return ChatPromptTemplate(messages=self.messages + [other]).partial(
10321032
**partials
1033-
) # type: ignore[call-arg]
1033+
)
10341034
if isinstance(other, (list, tuple)):
10351035
_other = ChatPromptTemplate.from_messages(other)
10361036
return ChatPromptTemplate(messages=self.messages + _other.messages).partial(
10371037
**partials
1038-
) # type: ignore[call-arg]
1038+
)
10391039
if isinstance(other, str):
10401040
prompt = HumanMessagePromptTemplate.from_template(other)
10411041
return ChatPromptTemplate(messages=self.messages + [prompt]).partial(
10421042
**partials
1043-
) # type: ignore[call-arg]
1043+
)
10441044
msg = f"Unsupported operand type for +: {type(other)}"
10451045
raise NotImplementedError(msg)
10461046

@@ -1129,7 +1129,7 @@ def from_role_strings(
11291129
Returns:
11301130
a chat prompt template.
11311131
"""
1132-
return cls( # type: ignore[call-arg]
1132+
return cls(
11331133
messages=[
11341134
ChatMessagePromptTemplate.from_template(template, role=role)
11351135
for role, template in string_messages

libs/core/langchain_core/prompts/image.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ def format(
116116
output: ImageURL = {"url": url}
117117
if detail:
118118
# Don't check literal values here: let the API check them
119-
output["detail"] = detail # type: ignore[typeddict-item]
119+
output["detail"] = detail
120120
return output
121121

122122
async def aformat(self, **kwargs: Any) -> ImageURL:

0 commit comments

Comments
 (0)