|
| 1 | +import random |
| 2 | +from concurrent.futures import Future |
| 3 | +from typing import Any, Generator, List, Tuple |
| 4 | + |
| 5 | +import pytest |
| 6 | + |
| 7 | +from vcap.batch_executor import BatchExecutor, _Request |
| 8 | + |
| 9 | + |
| 10 | +@pytest.fixture() |
| 11 | +def batch_executor(): |
| 12 | + """To use this fixture, replace batch_executor.batch_fn with your own |
| 13 | + batch function.""" |
| 14 | + |
| 15 | + def batch_fn(inputs): |
| 16 | + raise NotImplemented |
| 17 | + |
| 18 | + batch_executor = BatchExecutor(batch_fn=batch_fn) |
| 19 | + yield batch_executor |
| 20 | + batch_executor.close() |
| 21 | + |
| 22 | + |
| 23 | +def batch_fn_base(inputs: List[int], raises: bool) \ |
| 24 | + -> Generator[Any, None, None]: |
| 25 | + """Process results and yield them as they are processed |
| 26 | +
|
| 27 | + This function is to be used as a base for other test cases for batch_fn |
| 28 | + variants. |
| 29 | +
|
| 30 | + :param inputs: A list of inputs |
| 31 | + :param raises: If True, raises an error on the 5th input. If False, |
| 32 | + no exception will be raised. |
| 33 | + """ |
| 34 | + for i in inputs: |
| 35 | + if i == 5 and raises: |
| 36 | + # This occurs on the 5th input if raises=True. |
| 37 | + # This is used to test BatchExecutor's handling of exceptions |
| 38 | + raise RuntimeError("Oh no, a batch_fn error has occurred!") |
| 39 | + yield i * 100 |
| 40 | + |
| 41 | + |
| 42 | +def batch_fn_returns_generator(inputs: List[int]) \ |
| 43 | + -> Generator[Any, None, None]: |
| 44 | + return (o for o in batch_fn_base(inputs, raises=False)) |
| 45 | + |
| 46 | + |
| 47 | +def batch_fn_returns_generator_raises(inputs: List[int]) \ |
| 48 | + -> Generator[Any, None, None]: |
| 49 | + return (o for o in batch_fn_base(inputs, raises=True)) |
| 50 | + |
| 51 | + |
| 52 | +def batch_fn_returns_list(inputs: List[int]) -> List[Any]: |
| 53 | + """Process results and yield them at the end, as a list.""" |
| 54 | + return list(batch_fn_base(inputs, raises=False)) |
| 55 | + |
| 56 | + |
| 57 | +def batch_fn_returns_list_raises(inputs: List[int]) -> List[Any]: |
| 58 | + return list(batch_fn_base(inputs, raises=True)) |
| 59 | + |
| 60 | + |
| 61 | +@pytest.mark.parametrize( |
| 62 | + argnames=["batch_fn", "expect_partial_results"], |
| 63 | + argvalues=[ |
| 64 | + (batch_fn_returns_generator_raises, True), |
| 65 | + (batch_fn_returns_list_raises, False) |
| 66 | + ] |
| 67 | +) |
| 68 | +def test_exceptions_during_batch_fn( |
| 69 | + batch_executor, batch_fn, expect_partial_results): |
| 70 | + """Test that BatchExecutor catches exceptions that occur in the batch_fn |
| 71 | + and propagates them through the requests Future objects. |
| 72 | +
|
| 73 | + If an exception occurs after processing some of the batch, the expectation |
| 74 | + is that the unprocessed inputs of the batch will get an exception |
| 75 | + set (expect_partial_results=True). If the exception happens before |
| 76 | + receiving any results, all future objects should have exceptions set. |
| 77 | + """ |
| 78 | + batch_executor.batch_fn = batch_fn |
| 79 | + request_batch = [ |
| 80 | + _Request( |
| 81 | + future=Future(), |
| 82 | + input_data=i) |
| 83 | + for i in range(10) |
| 84 | + ] |
| 85 | + batch_executor._on_requests_ready(request_batch) |
| 86 | + for i, request in enumerate(request_batch): |
| 87 | + if expect_partial_results and i < 5: |
| 88 | + result = request.future.result(timeout=5) |
| 89 | + assert result == request.input_data * 100, \ |
| 90 | + "The result for this future doesn't match the input that " \ |
| 91 | + "was supposed to have been routed to it!" |
| 92 | + else: |
| 93 | + with pytest.raises(RuntimeError): |
| 94 | + request.future.result(timeout=5) |
| 95 | + |
| 96 | + |
| 97 | +@pytest.mark.parametrize( |
| 98 | + argnames=["batch_fn"], |
| 99 | + argvalues=[ |
| 100 | + (batch_fn_returns_generator,), |
| 101 | + (batch_fn_returns_list,) |
| 102 | + ] |
| 103 | +) |
| 104 | +def test_relevant_input_outputs_match(batch_executor, batch_fn): |
| 105 | + """Test the output for any given input is routed to the correct |
| 106 | + Future object. """ |
| 107 | + batch_executor.batch_fn = batch_fn |
| 108 | + |
| 109 | + # Submit input values in a random order |
| 110 | + request_inputs = list(range(10000)) |
| 111 | + random.seed("vcap? More like vgood") |
| 112 | + random.shuffle(request_inputs) |
| 113 | + |
| 114 | + # Submit inputs to the BatchExecutor and keep track of their futures |
| 115 | + inputs_and_futures: List[Tuple[int, Future]] = [] |
| 116 | + for input_data in request_inputs: |
| 117 | + future = batch_executor.submit(input_data) |
| 118 | + inputs_and_futures.append((input_data, future)) |
| 119 | + |
| 120 | + # Verify that all outputs are the expected ones for their respective input |
| 121 | + for input_data, future in inputs_and_futures: |
| 122 | + result = future.result(timeout=5) |
| 123 | + assert result == input_data * 100, \ |
| 124 | + "The result for this future doesn't match the input that " \ |
| 125 | + "was supposed to have been routed to it!" |
| 126 | + |
| 127 | + assert batch_executor.total_imgs_in_pipeline == 0 |
0 commit comments