Skip to content

Commit f2f9978

Browse files
authored
Update mypy in CI (#19449)
1 parent 4bcc4f1 commit f2f9978

33 files changed

+45
-44
lines changed

requirements/data/data.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,6 @@
33

44
lightning-utilities >=0.8.0, <0.10.0
55
# to be able to include also PL 2.0 and preserve `>` needed for CI min version bypass
6-
torch >0.14.0, <2.2.0
6+
torch >0.14.0, <=2.2.0
77
lightning-cloud
88
filelock

requirements/data/examples.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
Pillow >= 9.5.0
22
# min version to match torch >= 2.0.1
3-
torchvision >=0.15.2, <0.17.0
3+
torchvision >=0.15.2, <0.18.0

requirements/fabric/base.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
33

44
numpy >=1.17.2, <1.27.0
5-
torch >=1.13.0, <2.2.0
5+
torch >=1.13.0, <=2.2.0
66
fsspec[http] >=2022.5.0, <2023.11.0
77
packaging >=20.0, <=23.1
88
typing-extensions >=4.4.0, <4.10.0

requirements/fabric/examples.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# NOTE: the upper bound for the package version is only set for CI stability, and it is dropped while installing this package
22
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
33

4-
torchvision >=0.14.0, <0.17.0
4+
torchvision >=0.14.0, <0.18.0
55
torchmetrics >=0.10.0, <1.3.0
66
lightning-utilities >=0.8.0, <0.10.0

requirements/pytorch/base.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
33

44
numpy >=1.17.2, <1.27.0
5-
torch >=1.13.0, <2.2.0
5+
torch >=1.13.0, <=2.2.0
66
tqdm >=4.57.0, <4.67.0
77
PyYAML >=5.4, <6.1.0
88
fsspec[http] >=2022.5.0, <2023.11.0

requirements/pytorch/examples.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# in case you want to preserve/enforce restrictions on the latest compatible version, add "strict" as an in-line comment
33

44
requests <2.32.0
5-
torchvision >=0.14.0, <0.17.0
5+
torchvision >=0.14.0, <0.18.0
66
gym[classic_control] >=0.17.0, <0.27.0
77
ipython[all] <8.15.0
88
torchmetrics >=0.10.0, <1.3.0

requirements/typing.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
mypy==1.5.1
2-
torch==2.1.0
1+
mypy==1.8.0
2+
torch==2.2.0
33

44
types-Markdown
55
types-PyYAML

src/lightning/app/cli/lightning_cli.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ def init_pl_app(source: Union[Tuple[str], Tuple[str, str]], name: str, overwrite
363363
source_dir = str(Path(script_path).resolve().parent)
364364
elif len(source) == 2:
365365
# enable type checking once https://github.com/python/mypy/issues/1178 is available
366-
source_dir, script_path = source # type: ignore
366+
source_dir, script_path = source
367367
else:
368368
click.echo(
369369
f"Incorrect number of arguments. You passed ({', '.join(source)}) but only either one argument"

src/lightning/data/streaming/compression.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# See the License for the specific language governing permissions and
1212
# limitations under the License.
1313

14-
from abc import ABC, abstractclassmethod, abstractmethod
14+
from abc import ABC, abstractmethod
1515
from typing import Dict, TypeVar
1616

1717
from lightning_utilities.core.imports import RequirementCache, requires
@@ -35,7 +35,8 @@ def compress(self, data: bytes) -> bytes:
3535
def decompress(self, data: bytes) -> bytes:
3636
pass
3737

38-
@abstractclassmethod
38+
@classmethod
39+
@abstractmethod
3940
def register(cls, compressors: Dict[str, "Compressor"]) -> None:
4041
pass
4142

@@ -60,7 +61,7 @@ def decompress(self, data: bytes) -> bytes:
6061
return zstd.decompress(data)
6162

6263
@classmethod
63-
def register(cls, compressors: Dict[str, "Compressor"]) -> None: # type: ignore
64+
def register(cls, compressors: Dict[str, "Compressor"]) -> None:
6465
if not _ZSTD_AVAILABLE:
6566
return
6667

src/lightning/fabric/loggers/csv_logs.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def experiment(self) -> "_ExperimentWriter":
135135

136136
@override
137137
@rank_zero_only
138-
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
138+
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
139139
raise NotImplementedError("The `CSVLogger` does not yet support logging hyperparameters.")
140140

141141
@override

src/lightning/fabric/loggers/tensorboard.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None)
220220

221221
@override
222222
@rank_zero_only
223-
def log_hyperparams( # type: ignore[override]
223+
def log_hyperparams(
224224
self, params: Union[Dict[str, Any], Namespace], metrics: Optional[Dict[str, Any]] = None
225225
) -> None:
226226
"""Record hyperparameters. TensorBoard logs with and without saved hyperparameters are incompatible, the

src/lightning/fabric/plugins/precision/amp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def optimizer_step(
9090
if isinstance(optimizer, LBFGS):
9191
raise TypeError("AMP and the LBFGS optimizer are not compatible.")
9292
# note: the scaler will skip the `optimizer.step` if nonfinite gradients are found
93-
step_output = self.scaler.step(optimizer, **kwargs)
93+
step_output = self.scaler.step(optimizer, **kwargs) # type: ignore[arg-type]
9494
self.scaler.update()
9595
return step_output
9696

src/lightning/fabric/plugins/precision/bitsandbytes.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -344,9 +344,9 @@ def quantize(
344344
def to_empty(self, *, device: _DEVICE, recurse: bool = True) -> Self:
345345
if self.weight.dtype == torch.uint8: # was quantized
346346
# cannot init the quantized params directly
347-
weight = torch.empty(self.weight.quant_state[1], device=device, dtype=torch.half) # type: ignore[arg-type]
347+
weight = torch.empty(self.weight.quant_state[1], device=device, dtype=torch.half)
348348
else:
349-
weight = torch.empty_like(self.weight.data, device=device) # type: ignore[arg-type]
349+
weight = torch.empty_like(self.weight.data, device=device)
350350
device = torch.device(device)
351351
if device.type == "cuda": # re-quantize
352352
self.quantize_(weight, device)

src/lightning/fabric/plugins/precision/fsdp.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
14-
from typing import TYPE_CHECKING, Any, ContextManager, Dict, Literal, Optional, cast
14+
from typing import TYPE_CHECKING, Any, ContextManager, Dict, Literal, Optional
1515

1616
import torch
1717
from lightning_utilities import apply_to_collection
@@ -128,7 +128,7 @@ def convert_output(self, data: Any) -> Any:
128128
@override
129129
def backward(self, tensor: Tensor, model: Optional[Module], *args: Any, **kwargs: Any) -> None:
130130
if self.scaler is not None:
131-
tensor = cast(Tensor, self.scaler.scale(tensor))
131+
tensor = self.scaler.scale(tensor)
132132
super().backward(tensor, model, *args, **kwargs)
133133

134134
@override

src/lightning/fabric/strategies/fsdp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def mixed_precision_config(self) -> Optional["MixedPrecision"]:
225225
return plugin.mixed_precision_config
226226
return None
227227

228-
@property # type: ignore[override]
228+
@property
229229
@override
230230
def precision(self) -> FSDPPrecision:
231231
plugin = self._precision

src/lightning/fabric/strategies/single_xla.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ def __init__(
5050
precision=precision,
5151
)
5252

53-
@property # type: ignore[override]
53+
@property
5454
@override
5555
def checkpoint_io(self) -> XLACheckpointIO:
5656
plugin = self._checkpoint_io
@@ -66,7 +66,7 @@ def checkpoint_io(self, io: Optional[XLACheckpointIO]) -> None:
6666
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
6767
self._checkpoint_io = io
6868

69-
@property # type: ignore[override]
69+
@property
7070
@override
7171
def precision(self) -> XLAPrecision:
7272
plugin = self._precision

src/lightning/fabric/strategies/xla.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def root_device(self) -> torch.device:
7272
def num_processes(self) -> int:
7373
return len(self.parallel_devices) if self.parallel_devices is not None else 0
7474

75-
@property # type: ignore[override]
75+
@property
7676
@override
7777
def checkpoint_io(self) -> XLACheckpointIO:
7878
plugin = self._checkpoint_io
@@ -88,7 +88,7 @@ def checkpoint_io(self, io: Optional[XLACheckpointIO]) -> None:
8888
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
8989
self._checkpoint_io = io
9090

91-
@property # type: ignore[override]
91+
@property
9292
@override
9393
def precision(self) -> XLAPrecision:
9494
plugin = self._precision

src/lightning/fabric/strategies/xla_fsdp.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ def root_device(self) -> torch.device:
124124
def num_processes(self) -> int:
125125
return len(self.parallel_devices) if self.parallel_devices is not None else 0
126126

127-
@property # type: ignore[override]
127+
@property
128128
@override
129129
def checkpoint_io(self) -> XLACheckpointIO:
130130
plugin = self._checkpoint_io
@@ -140,7 +140,7 @@ def checkpoint_io(self, io: Optional[XLACheckpointIO]) -> None:
140140
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
141141
self._checkpoint_io = io
142142

143-
@property # type: ignore[override]
143+
@property
144144
@override
145145
def precision(self) -> XLAPrecision:
146146
plugin = self._precision

src/lightning/fabric/utilities/apply_func.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727

2828

2929
def _from_numpy(value: np.ndarray, device: _DEVICE) -> Tensor:
30-
return torch.from_numpy(value).to(device) # type: ignore[arg-type]
30+
return torch.from_numpy(value).to(device)
3131

3232

3333
CONVERSION_DTYPES: List[Tuple[Any, Callable[[Any, Any], Tensor]]] = [

src/lightning/fabric/utilities/init.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def _materialize(module: torch.nn.Module, device: _DEVICE) -> None:
6060
"""Materialize a module."""
6161
if not _TORCH_GREATER_EQUAL_2_1:
6262
raise RuntimeError("recurse=False requires torch 2.1")
63-
module.to_empty(device=device, recurse=False) # type: ignore[arg-type]
63+
module.to_empty(device=device, recurse=False)
6464
if not hasattr(module, "reset_parameters"):
6565
raise TypeError(
6666
f"Materialization requires that the `{type(module).__name__}.reset_parameters` method is implemented."

src/lightning/pytorch/cli.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ def __init__(
360360
self.trainer_class = trainer_class
361361
self.trainer_defaults = trainer_defaults or {}
362362
self.seed_everything_default = seed_everything_default
363-
self.parser_kwargs = parser_kwargs or {} # type: ignore[var-annotated] # github.com/python/mypy/issues/6463
363+
self.parser_kwargs = parser_kwargs or {}
364364
self.auto_configure_optimizers = auto_configure_optimizers
365365

366366
self.model_class = model_class

src/lightning/pytorch/core/module.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -320,7 +320,7 @@ def loggers(self) -> Union[List[Logger], List[FabricLogger]]:
320320
return self._fabric.loggers
321321
if self._trainer is not None:
322322
return self._trainer.loggers
323-
return [] # type: ignore[return-value]
323+
return []
324324

325325
def _call_batch_hook(self, hook_name: str, *args: Any) -> Any:
326326
trainer = self._trainer

src/lightning/pytorch/loggers/comet.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ def experiment(self) -> Union["Experiment", "ExistingExperiment", "OfflineExperi
305305

306306
@override
307307
@rank_zero_only
308-
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
308+
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
309309
params = _convert_params(params)
310310
params = _flatten_dict(params)
311311
self.experiment.log_parameters(params)

src/lightning/pytorch/loggers/csv_logs.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ def save_dir(self) -> str:
145145

146146
@override
147147
@rank_zero_only
148-
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
148+
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
149149
params = _convert_params(params)
150150
self.experiment.log_hparams(params)
151151

src/lightning/pytorch/loggers/mlflow.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ def experiment_id(self) -> Optional[str]:
220220

221221
@override
222222
@rank_zero_only
223-
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
223+
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
224224
params = _convert_params(params)
225225
params = _flatten_dict(params)
226226

src/lightning/pytorch/loggers/neptune.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ def run(self) -> "Run":
390390

391391
@override
392392
@rank_zero_only
393-
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
393+
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
394394
r"""Log hyperparameters to the run.
395395
396396
Hyperparameters will be logged under the "<prefix>/hyperparams" namespace.

src/lightning/pytorch/loggers/tensorboard.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ def save_dir(self) -> str:
154154

155155
@override
156156
@rank_zero_only
157-
def log_hyperparams( # type: ignore[override]
157+
def log_hyperparams(
158158
self, params: Union[Dict[str, Any], Namespace], metrics: Optional[Dict[str, Any]] = None
159159
) -> None:
160160
"""Record hyperparameters. TensorBoard logs with and without saved hyperparameters are incompatible, the

src/lightning/pytorch/loggers/wandb.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ def watch(
415415

416416
@override
417417
@rank_zero_only
418-
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # type: ignore[override]
418+
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
419419
params = _convert_params(params)
420420
params = _sanitize_callable_params(params)
421421
self.experiment.config.update(params, allow_val_change=True)

src/lightning/pytorch/plugins/precision/amp.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -86,14 +86,14 @@ def optimizer_step( # type: ignore[override]
8686
# Unscaling needs to be performed here in case we are going to apply gradient clipping.
8787
# Optimizers that perform unscaling in their `.step()` method are not supported (e.g., fused Adam).
8888
# Note: `unscale` happens after the closure is executed, but before the `on_before_optimizer_step` hook.
89-
self.scaler.unscale_(optimizer)
89+
self.scaler.unscale_(optimizer) # type: ignore[arg-type]
9090

9191
self._after_closure(model, optimizer)
9292

9393
# in manual optimization, the closure does not return a value
9494
if not skip_unscaling:
9595
# note: the scaler will skip the `optimizer.step` if nonfinite gradients are found
96-
step_output = self.scaler.step(optimizer, **kwargs)
96+
step_output = self.scaler.step(optimizer, **kwargs) # type: ignore[arg-type]
9797
self.scaler.update()
9898
return step_output
9999
return closure_result

src/lightning/pytorch/plugins/precision/fsdp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def convert_output(self, data: Any) -> Any:
137137
@override
138138
def pre_backward(self, tensor: Tensor, module: "pl.LightningModule") -> Tensor: # type: ignore[override]
139139
if self.scaler is not None:
140-
tensor = self.scaler.scale(tensor) # type: ignore[assignment]
140+
tensor = self.scaler.scale(tensor)
141141
return super().pre_backward(tensor, module)
142142

143143
@override

src/lightning/pytorch/strategies/fsdp.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ def mixed_precision_config(self) -> Optional["MixedPrecision"]:
213213
return plugin.mixed_precision_config
214214
return None
215215

216-
@property # type: ignore[override]
216+
@property
217217
@override
218218
def precision_plugin(self) -> FSDPPrecision:
219219
plugin = self._precision_plugin

src/lightning/pytorch/strategies/single_xla.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def __init__(
5656
)
5757
self.debug = debug
5858

59-
@property # type: ignore[override]
59+
@property
6060
@override
6161
def checkpoint_io(self) -> Union[XLACheckpointIO, _WrappingCheckpointIO]:
6262
plugin = self._checkpoint_io
@@ -72,7 +72,7 @@ def checkpoint_io(self, io: Optional[Union[XLACheckpointIO, _WrappingCheckpointI
7272
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
7373
self._checkpoint_io = io
7474

75-
@property # type: ignore[override]
75+
@property
7676
@override
7777
def precision_plugin(self) -> XLAPrecision:
7878
plugin = self._precision_plugin

src/lightning/pytorch/strategies/xla.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def __init__(
7070
self._launched = False
7171
self._sync_module_states = sync_module_states
7272

73-
@property # type: ignore[override]
73+
@property
7474
@override
7575
def checkpoint_io(self) -> Union[XLACheckpointIO, _WrappingCheckpointIO]:
7676
plugin = self._checkpoint_io
@@ -86,7 +86,7 @@ def checkpoint_io(self, io: Optional[Union[XLACheckpointIO, _WrappingCheckpointI
8686
raise TypeError(f"The XLA strategy can only work with the `XLACheckpointIO` plugin, found {io}")
8787
self._checkpoint_io = io
8888

89-
@property # type: ignore[override]
89+
@property
9090
@override
9191
def precision_plugin(self) -> XLAPrecision:
9292
plugin = self._precision_plugin

0 commit comments

Comments
 (0)