Skip to content

Commit 69eee70

Browse files
committed
update tests for 2.2.1
1 parent 623ec58 commit 69eee70

File tree

12 files changed

+2
-74
lines changed

12 files changed

+2
-74
lines changed

tests/tests_fabric/plugins/precision/test_amp_integration.py

-5
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,6 @@ def forward(self, x):
4141
return output
4242

4343

44-
@pytest.mark.xfail(
45-
# https://github.com/pytorch/pytorch/issues/116056
46-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
47-
reason="Windows + DDP issue in PyTorch 2.2",
48-
)
4944
@pytest.mark.parametrize(
5045
("accelerator", "precision", "expected_dtype"),
5146
[

tests/tests_fabric/strategies/launchers/test_multiprocessing_integration.py

-5
Original file line numberDiff line numberDiff line change
@@ -31,11 +31,6 @@ def __init__(self):
3131
self.register_buffer("buffer", torch.ones(3))
3232

3333

34-
@pytest.mark.xfail(
35-
# https://github.com/pytorch/pytorch/issues/116056
36-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
37-
reason="Windows + DDP issue in PyTorch 2.2",
38-
)
3934
@pytest.mark.parametrize("strategy", ["ddp_spawn", pytest.param("ddp_fork", marks=RunIf(skip_windows=True))])
4035
def test_memory_sharing_disabled(strategy):
4136
"""Test that the multiprocessing launcher disables memory sharing on model parameters and buffers to avoid race

tests/tests_fabric/strategies/test_ddp_integration.py

-5
Original file line numberDiff line numberDiff line change
@@ -28,11 +28,6 @@
2828
from tests_fabric.test_fabric import BoringModel
2929

3030

31-
@pytest.mark.xfail(
32-
# https://github.com/pytorch/pytorch/issues/116056
33-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
34-
reason="Windows + DDP issue in PyTorch 2.2",
35-
)
3631
@pytest.mark.parametrize(
3732
"accelerator",
3833
[

tests/tests_fabric/utilities/test_distributed.py

-5
Original file line numberDiff line numberDiff line change
@@ -121,11 +121,6 @@ def test_collective_operations(devices, process):
121121
spawn_launch(process, devices)
122122

123123

124-
@pytest.mark.xfail(
125-
# https://github.com/pytorch/pytorch/issues/116056
126-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
127-
reason="Windows + DDP issue in PyTorch 2.2",
128-
)
129124
@pytest.mark.flaky(reruns=3) # flaky with "process 0 terminated with signal SIGABRT" (GLOO)
130125
def test_is_shared_filesystem(tmp_path, monkeypatch):
131126
# In the non-distributed case, every location is interpreted as 'shared'

tests/tests_fabric/utilities/test_spike.py

-5
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,6 @@ def spike_detection_test(fabric, global_rank_spike, spike_value, should_raise):
2929
)
3030

3131

32-
@pytest.mark.xfail(
33-
# https://github.com/pytorch/pytorch/issues/116056
34-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
35-
reason="Windows + DDP issue in PyTorch 2.2",
36-
)
3732
@pytest.mark.flaky(max_runs=3)
3833
@pytest.mark.parametrize(
3934
("global_rank_spike", "num_devices", "spike_value", "finite_only"),

tests/tests_pytorch/callbacks/test_spike.py

-5
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,6 @@ def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
4747
super().on_train_batch_end(trainer, pl_module, outputs, batch, batch_idx)
4848

4949

50-
@pytest.mark.xfail(
51-
# https://github.com/pytorch/pytorch/issues/116056
52-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
53-
reason="Windows + DDP issue in PyTorch 2.2",
54-
)
5550
@pytest.mark.flaky(max_runs=3)
5651
@pytest.mark.parametrize(
5752
("global_rank_spike", "num_devices", "spike_value", "finite_only"),

tests/tests_pytorch/loops/test_prediction_loop.py

-5
Original file line numberDiff line numberDiff line change
@@ -52,11 +52,6 @@ def predict_step(self, batch, batch_idx):
5252
assert trainer.predict_loop.predictions == []
5353

5454

55-
@pytest.mark.xfail(
56-
# https://github.com/pytorch/pytorch/issues/116056
57-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
58-
reason="Windows + DDP issue in PyTorch 2.2",
59-
)
6055
@pytest.mark.parametrize("use_distributed_sampler", [False, True])
6156
def test_prediction_loop_batch_sampler_set_epoch_called(tmp_path, use_distributed_sampler):
6257
"""Tests that set_epoch is called on the dataloader's batch sampler (if any) during prediction."""

tests/tests_pytorch/models/test_amp.py

+1-10
Original file line numberDiff line numberDiff line change
@@ -55,16 +55,7 @@ def _assert_autocast_enabled(self):
5555
[
5656
("single_device", "16-mixed", 1),
5757
("single_device", "bf16-mixed", 1),
58-
pytest.param(
59-
"ddp_spawn",
60-
"16-mixed",
61-
2,
62-
marks=pytest.mark.xfail(
63-
# https://github.com/pytorch/pytorch/issues/116056
64-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
65-
reason="Windows + DDP issue in PyTorch 2.2",
66-
),
67-
),
58+
("ddp_spawn", "16-mixed", 2),
6859
pytest.param("ddp_spawn", "bf16-mixed", 2, marks=RunIf(skip_windows=True)),
6960
],
7061
)

tests/tests_pytorch/serve/test_servable_module_validator.py

-5
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,6 @@ def test_servable_module_validator():
3838
callback.on_train_start(Trainer(accelerator="cpu"), model)
3939

4040

41-
@pytest.mark.xfail(
42-
# https://github.com/pytorch/pytorch/issues/116056
43-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
44-
reason="Windows + DDP issue in PyTorch 2.2",
45-
)
4641
@pytest.mark.flaky(reruns=3)
4742
def test_servable_module_validator_with_trainer(tmpdir):
4843
callback = ServableModuleValidator()

tests/tests_pytorch/strategies/launchers/test_multiprocessing.py

-10
Original file line numberDiff line numberDiff line change
@@ -196,11 +196,6 @@ def on_fit_start(self) -> None:
196196
assert torch.equal(self.layer.weight.data, self.tied_layer.weight.data)
197197

198198

199-
@pytest.mark.xfail(
200-
# https://github.com/pytorch/pytorch/issues/116056
201-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
202-
reason="Windows + DDP issue in PyTorch 2.2",
203-
)
204199
def test_memory_sharing_disabled():
205200
"""Test that the multiprocessing launcher disables memory sharing on model parameters and buffers to avoid race
206201
conditions on model updates."""
@@ -221,11 +216,6 @@ def test_check_for_missing_main_guard():
221216
launcher.launch(function=Mock())
222217

223218

224-
@pytest.mark.xfail(
225-
# https://github.com/pytorch/pytorch/issues/116056
226-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
227-
reason="Windows + DDP issue in PyTorch 2.2",
228-
)
229219
def test_fit_twice_raises():
230220
model = BoringModel()
231221
trainer = Trainer(

tests/tests_pytorch/trainer/connectors/test_data_connector.py

-5
Original file line numberDiff line numberDiff line change
@@ -125,11 +125,6 @@ def on_train_end(self):
125125
self.ctx.__exit__(None, None, None)
126126

127127

128-
@pytest.mark.xfail(
129-
# https://github.com/pytorch/pytorch/issues/116056
130-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
131-
reason="Windows + DDP issue in PyTorch 2.2",
132-
)
133128
@pytest.mark.parametrize("num_workers", [0, 1, 2])
134129
def test_dataloader_persistent_workers_performance_warning(num_workers, tmp_path):
135130
"""Test that when the multiprocessing start-method is 'spawn', we recommend setting `persistent_workers=True`."""

tests/tests_pytorch/trainer/logging_/test_train_loop_logging.py

+1-9
Original file line numberDiff line numberDiff line change
@@ -348,15 +348,7 @@ def validation_step(self, batch, batch_idx):
348348
("devices", "accelerator"),
349349
[
350350
(1, "cpu"),
351-
pytest.param(
352-
2,
353-
"cpu",
354-
marks=pytest.mark.xfail(
355-
# https://github.com/pytorch/pytorch/issues/116056
356-
sys.platform == "win32" and _TORCH_GREATER_EQUAL_2_2,
357-
reason="Windows + DDP issue in PyTorch 2.2",
358-
),
359-
),
351+
(2, "cpu"),
360352
pytest.param(2, "gpu", marks=RunIf(min_cuda_gpus=2)),
361353
],
362354
)

0 commit comments

Comments
 (0)