Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Script can be run via pytest options #1

Merged
merged 8 commits into from
Feb 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions .github/workflows/pytest.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
build:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.10'

- name: Install dependencies
run: pip install -r requirements.txt && pip install -e .

- name: Run pytest
run: pytest --junitxml=report.xml

- name: Publish Test Report
uses: actions/upload-artifact@v3
if: always() # Ensures that the report is published even if pytest fails
with:
name: pytest-reports
path: report.xml
19 changes: 19 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

Sometimes one test can affect the execution of another test. This simple script tries to find the offender.

This script can be called as standalone script, or via pytest option.

Example:

```python
Expand All @@ -24,6 +26,14 @@ Running 2 tests.
Faulty test: faulty_test.py::test_faulty
```

## Comparison of UX

Standalone script:
[![asciicast](https://asciinema.org/a/ONa6xL49QAvpPT4XPMMFDe367.svg)](https://asciinema.org/a/ONa6xL49QAvpPT4XPMMFDe367)

pytest option:
[![asciicast](https://asciinema.org/a/FLwgOSMhQyT30pkn2iOxh3qjN.svg)](https://asciinema.org/a/FLwgOSMhQyT30pkn2iOxh3qjN)

## Installation

```shell
Expand All @@ -32,6 +42,7 @@ $ pip install pytest-bisect-tests

## Usage

### Standalone script
```shell
$ pytest-bisect-tests --failing-test "<identifier of the test as pytest shows them with -v>"
```
Expand All @@ -54,6 +65,14 @@ options:
--stdout If passed, pytest output will be shown.
```

### As pytest option

This approach automatically discovers all tests in the suite and failing test, so it has minimal input options.

```shell
$ pytest --bisect-first-failure
```

## Alternatives
[detect-test-pollution](https://github.com/asottile/detect-test-pollution), an alternative package with similar functionality.
Lacks passing pytest run and collect options or showing the output from pytest. It also doesn't support pytest plugins that alter
Expand Down
4 changes: 3 additions & 1 deletion pytest_bisect_tests/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ def main() -> None:
"This is useful when, for example, you have a test grouping plugin that affects the tests run."
),
)
parser.add_argument("--stdout", action="store_true", help="If passed, pytest output will be shown.")
parser.add_argument(
"--stdout", action="store_true", help="If passed, pytest output will be shown."
)
args = parser.parse_args()

pytest_runner = PytestRunner(
Expand Down
65 changes: 64 additions & 1 deletion pytest_bisect_tests/pytest_plugin.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,19 @@
import os
from typing import Generator, List, cast
import subprocess
import sys
from typing import Generator, List, Optional, Union, cast
from pluggy import Result

import pytest

from pytest_bisect_tests.bisect import NoResultFound, run_bisect
from pytest_bisect_tests.pytest_runner import run_pytest_with_test_names


def pytest_addoption(parser: pytest.Parser) -> None:
parser.addoption("--bisect-tests-ids-from-fd", default=-1, type=int)
parser.addoption("--bisect-tests-ids-to-fd", default=-1, type=int)
parser.addoption("--bisect-first-failure", action="store_true")


@pytest.hookimpl(hookwrapper=True)
Expand All @@ -30,3 +36,60 @@ def pytest_collection_modifyitems(
if write_fd >= 0:
os.write(write_fd, "\n".join([item.nodeid for item in items]).encode())
os.close(write_fd)


def pytest_cmdline_main(
config: pytest.Config,
) -> Optional[Union[pytest.ExitCode, int]]:
if config.option.bisect_first_failure:
from _pytest.main import wrap_session

def doit(
config: pytest.Config, session: pytest.Session
) -> Union[pytest.ExitCode, int]:
pytest_args = sys.argv[:]
pytest_args.remove("--bisect-first-failure")

print("Running all tests until first failure...")
try:
subprocess.check_call(
[*pytest_args, "--cache-clear", "-x"],
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
)
except subprocess.CalledProcessError:
pass

failed_tests = list(config.cache.get("cache/lastfailed", default={}))
if not failed_tests:
print("Error: No test failed.")
return 1

if len(failed_tests) > 1:
print("Error: Multiple tests failed:")
for test in failed_tests:
print("-", test)
return 1

all_tests = list(config.cache.get("cache/nodeids", default=[]))
print("Collected", len(all_tests), "tests.")
print(f"Failing test: {failed_tests[0]!r}.")
try:
faulty_test = run_bisect(
test_names=all_tests,
failing_test=failed_tests[0],
test_runner=lambda names: run_pytest_with_test_names(
names,
args=pytest_args,
stdout=subprocess.DEVNULL,
),
)
print("Faulty test:", faulty_test)
return 0
except NoResultFound:
print("Error: No faulty test found.")
return 1

return wrap_session(config, doit)

return None
50 changes: 28 additions & 22 deletions pytest_bisect_tests/pytest_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import os
import shlex
import subprocess
from typing import List
from typing import List, Optional


class PytestRunner:
Expand All @@ -17,27 +17,9 @@ def __init__(
self.__stdout = None if stdout else subprocess.DEVNULL

def run(self, test_names: List[str]) -> bool:
print("Running", len(test_names), "tests.")
r, w = os.pipe()
os.set_inheritable(r, True)
try:
p = subprocess.Popen(
[
"pytest",
"--quiet",
"--bisect-tests-ids-from-fd",
str(r),
*self.__run_options,
],
close_fds=False,
stdout=self.__stdout,
stderr=subprocess.DEVNULL,
)
os.write(w, "\n".join(test_names).encode())
os.close(w)
return p.wait() == 0
finally:
os.closerange(r, w)
return run_pytest_with_test_names(
test_names=test_names, args=["pytest", "--quiet"], stdout=self.__stdout
)

def collect_tests(self) -> List[str]:
r, w = os.pipe()
Expand All @@ -61,3 +43,27 @@ def collect_tests(self) -> List[str]:
return [l.strip() for l in f.readlines()]
finally:
os.closerange(r, w)


def run_pytest_with_test_names(
test_names: List[str], args: List[str], stdout: Optional[int]
) -> bool:
print("Running", len(test_names), "tests.")
r, w = os.pipe()
os.set_inheritable(r, True)
try:
p = subprocess.Popen(
[
*args,
"--bisect-tests-ids-from-fd",
str(r),
],
close_fds=False,
stdout=stdout,
stderr=subprocess.DEVNULL,
)
os.write(w, "\n".join(test_names).encode())
os.close(w)
return p.wait() == 0
finally:
os.closerange(r, w)
89 changes: 62 additions & 27 deletions tests/integration_test.py
Original file line number Diff line number Diff line change
@@ -1,58 +1,93 @@
from pathlib import Path
import shlex
import subprocess
from typing import Tuple
import typing

import pytest
from _pytest.fixtures import SubRequest

HERE = Path(__file__).parent


def test_should_detect_faulty_test() -> None:
def _standalone_caller(
failing_test: str, collect_options: str, run_options: str
) -> Tuple[str, int]:
args = ["pytest-bisect-tests", "--failing-test", failing_test]
if collect_options:
args.extend(["--collect-options", collect_options])
if run_options:
args.extend(["--run-options", run_options])

p = subprocess.Popen(
[
"pytest-bisect-tests",
"--failing-test",
"integration_data/faulty_test.py::test_failing",
"--run-options",
"faulty_test.py",
],
args,
cwd=HERE.parent / "integration_data",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
try:
p.wait(30)
out, err = p.communicate(timeout=30)
retcode = p.wait(30)
out, _ = p.communicate(timeout=30)
return out.decode(), retcode
except subprocess.TimeoutExpired:
p.kill()
pytest.fail("process timedout")

assert "Faulty test: integration_data/faulty_test.py::test_faulty" in out.decode()
assert p.wait() == 0

def _inpytest_caller(
failing_test: str, collect_options: str, run_options: str
) -> Tuple[str, int]:
args = ["pytest", "--bisect-first-failure"]
if collect_options:
args.extend(shlex.split(collect_options))
if run_options:
args.extend(shlex.split(run_options))

def test_should_work_with_items_modifying_plugins() -> None:
p = subprocess.Popen(
[
"pytest-bisect-tests",
"--failing-test",
"integration_data/test_groups_test.py::test_failing_group2",
"--collect-options",
"--test-group-count 2 --test-group 2",
"--run-options",
"test_groups_test.py",
],
args,
cwd=HERE.parent / "integration_data",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
try:
p.wait(30)
out, err = p.communicate(timeout=30)
retcode = p.wait(30)
out, _ = p.communicate(timeout=30)
return out.decode(), retcode
except subprocess.TimeoutExpired:
p.kill()
pytest.fail("process timedout")

assert (
"Faulty test: integration_data/test_groups_test.py::test_faulty" in out.decode()

class PluginCaller(typing.Protocol):
def __call__(
self, failing_test: str, collect_options: str, run_options: str
) -> Tuple[str, int]: ...


@pytest.fixture(
params=[_standalone_caller, _inpytest_caller], ids=["standalone", "inpytest"]
)
def plugin_caller(request: SubRequest) -> PluginCaller:
return request.param


def test_should_detect_faulty_test(plugin_caller: PluginCaller) -> None:
out, code = plugin_caller(
failing_test="integration_data/faulty_test.py::test_failing",
collect_options="",
run_options="faulty_test.py",
)
assert p.wait() == 0

assert "Faulty test: integration_data/faulty_test.py::test_faulty" in out
assert code == 0


def test_should_work_with_items_modifying_plugins(plugin_caller: PluginCaller) -> None:
out, code = plugin_caller(
failing_test="integration_data/test_groups_test.py::test_failing_group2",
collect_options="--test-group-count 2 --test-group 2",
run_options="test_groups_test.py",
)

assert "Faulty test: integration_data/test_groups_test.py::test_faulty" in out
assert code == 0
Loading