Skip to content

Commit f34fd09

Browse files
committed
add .pre-commit-config.yaml, run lint
1 parent dbc99d2 commit f34fd09

File tree

5 files changed

+40
-19
lines changed

5 files changed

+40
-19
lines changed

.pre-commit-config.yaml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
repos:
2+
- repo: https://github.com/psf/black
3+
rev: "24.8.0"
4+
hooks:
5+
- id: black
6+
7+
- repo: https://github.com/astral-sh/ruff-pre-commit
8+
rev: v0.6.2
9+
hooks:
10+
- id: ruff
11+
name: "ruff for tiledbsoma_ml"
12+
args: ["--config=pyproject.toml"]
13+
14+
- repo: https://github.com/pre-commit/mirrors-mypy
15+
rev: v1.11.1
16+
hooks:
17+
- id: mypy
18+
pass_filenames: false
19+
args: ["--config-file=pyproject.toml", "src"]
20+
additional_dependencies: ["attrs", "numpy"]

notebooks/tutorial_lightning.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,13 @@
3131
"outputs": [],
3232
"source": [
3333
"import pytorch_lightning as pl\n",
34-
"import tiledbsoma_ml as soma_ml\n",
3534
"import torch\n",
3635
"from sklearn.preprocessing import LabelEncoder\n",
3736
"\n",
3837
"import tiledbsoma as soma\n",
3938
"\n",
39+
"import tiledbsoma_ml as soma_ml\n",
40+
"\n",
4041
"CZI_Census_Homo_Sapiens_URL = \"s3://cellxgene-census-public-us-west-2/cell-census/2024-07-01/soma/census_data/homo_sapiens/\"\n",
4142
"\n",
4243
"experiment = soma.open(\n",

notebooks/tutorial_pytorch.ipynb

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,12 @@
4242
"metadata": {},
4343
"outputs": [],
4444
"source": [
45-
"import tiledbsoma_ml as soma_ml\n",
4645
"from sklearn.preprocessing import LabelEncoder\n",
4746
"\n",
4847
"import tiledbsoma as soma\n",
4948
"\n",
49+
"import tiledbsoma_ml as soma_ml\n",
50+
"\n",
5051
"CZI_Census_Homo_Sapiens_URL = \"s3://cellxgene-census-public-us-west-2/cell-census/2024-07-01/soma/census_data/homo_sapiens/\"\n",
5152
"\n",
5253
"experiment = soma.open(\n",

pyproject.toml

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ dependencies = [
1616
"pyarrow",
1717
"scipy"
1818
]
19-
requires-python = ">= 3.8"
19+
requires-python = ">= 3.9"
2020
description = "Machine learning tools for use with tiledbsoma"
2121
readme = "README.md"
2222
authors = [
@@ -39,7 +39,6 @@ classifiers = [
3939
"Operating System :: MacOS :: MacOS X",
4040
"Operating System :: Microsoft :: Windows",
4141
"Programming Language :: Python",
42-
"Programming Language :: Python :: 3.8",
4342
"Programming Language :: Python :: 3.9",
4443
"Programming Language :: Python :: 3.10",
4544
"Programming Language :: Python :: 3.11",
@@ -65,15 +64,15 @@ show_error_codes = true
6564
ignore_missing_imports = true
6665
warn_unreachable = true
6766
strict = true
68-
python_version = 3.8
67+
python_version = 3.9
6968
plugins = "numpy.typing.mypy_plugin"
7069

7170
[tool.ruff]
7271
lint.select = ["E", "F", "B", "I"]
7372
lint.ignore = ["E501"] # line too long
7473
lint.extend-select = ["I001"] # unsorted-imports
7574
fix = true
76-
target-version = "py38"
75+
target-version = "py39"
7776
line-length = 120
7877

7978

tests/test_pytorch.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,14 @@
2626
# This supports the pytest `ml` mark, which can be used to disable all PyTorch-dependent
2727
# tests.
2828
try:
29+
from torch.utils.data._utils.worker import WorkerInfo
30+
2931
from tiledbsoma_ml.pytorch import (
3032
ExperimentAxisQueryIterable,
3133
ExperimentAxisQueryIterableDataset,
3234
ExperimentAxisQueryIterDataPipe,
3335
experiment_dataloader,
3436
)
35-
from torch.utils.data._utils.worker import WorkerInfo
3637
except ImportError:
3738
# this should only occur when not running `ml`-marked tests
3839
pass
@@ -538,11 +539,11 @@ def test_distributed__returns_data_partition_for_rank(
538539
"""Tests pytorch._partition_obs_joinids() behavior in a simulated PyTorch distributed processing mode,
539540
using mocks to avoid having to do real PyTorch distributed setup."""
540541

541-
with patch("torch.distributed.is_initialized") as mock_dist_is_initialized, patch(
542-
"torch.distributed.get_rank"
543-
) as mock_dist_get_rank, patch(
544-
"torch.distributed.get_world_size"
545-
) as mock_dist_get_world_size:
542+
with (
543+
patch("torch.distributed.is_initialized") as mock_dist_is_initialized,
544+
patch("torch.distributed.get_rank") as mock_dist_get_rank,
545+
patch("torch.distributed.get_world_size") as mock_dist_get_world_size,
546+
):
546547
mock_dist_is_initialized.return_value = True
547548
mock_dist_get_rank.return_value = rank
548549
mock_dist_get_world_size.return_value = world_size
@@ -593,13 +594,12 @@ def test_distributed_and_multiprocessing__returns_data_partition_for_rank(
593594
DataLoader multiprocessing mode, using mocks to avoid having to do distributed pytorch
594595
setup or real DataLoader multiprocessing."""
595596

596-
with patch("torch.utils.data.get_worker_info") as mock_get_worker_info, patch(
597-
"torch.distributed.is_initialized"
598-
) as mock_dist_is_initialized, patch(
599-
"torch.distributed.get_rank"
600-
) as mock_dist_get_rank, patch(
601-
"torch.distributed.get_world_size"
602-
) as mock_dist_get_world_size:
597+
with (
598+
patch("torch.utils.data.get_worker_info") as mock_get_worker_info,
599+
patch("torch.distributed.is_initialized") as mock_dist_is_initialized,
600+
patch("torch.distributed.get_rank") as mock_dist_get_rank,
601+
patch("torch.distributed.get_world_size") as mock_dist_get_world_size,
602+
):
603603
mock_get_worker_info.return_value = WorkerInfo(
604604
id=worker_id, num_workers=num_workers, seed=1234
605605
)

0 commit comments

Comments
 (0)