Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Make the TDS size-sub-failure test more deterministic #6588

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 32 additions & 10 deletions py/server/tests/test_table_data_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ def __init__(self, gen_pa_table: Generator[pa.Table, None, None], pt_schema: pa.
self.partitions_size_subscriptions: Dict[TableLocationKey, bool] = {}
self.existing_partitions_called: int = 0
self.partition_size_called: int = 0
self.is_size_sub_failure_cb_called: bool = False
self.size_sub_failure_cb_called_cond: threading.Condition = threading.Condition()

def table_schema(self, table_key: TableKeyImpl,
schema_cb: Callable[[pa.Schema, Optional[pa.Schema]], None],
Expand Down Expand Up @@ -159,15 +161,21 @@ def _th_partition_size_changes(self, table_key: TableKeyImpl, table_location_key
return

while self.subscriptions_enabled_for_test and self.partitions_size_subscriptions[table_location_key]:
pa_table = self.partitions[table_location_key]
rbs = pa_table.to_batches()
rbs.append(pa_table.to_batches()[0])
new_pa_table = pa.Table.from_batches(rbs)
self.partitions[table_location_key] = new_pa_table
size_cb(new_pa_table.num_rows)
if self.sub_partition_size_fail_test:
failure_cb(Exception("table location size subscription failure"))
# give main test thread a chance to wait on the condition
time.sleep(0.1)
with self.size_sub_failure_cb_called_cond:
failure_cb(Exception("table location size subscription failure"))
self.is_size_sub_failure_cb_called = True
self.size_sub_failure_cb_called_cond.notify()
return
else:
pa_table = self.partitions[table_location_key]
rbs = pa_table.to_batches()
rbs.append(pa_table.to_batches()[0])
new_pa_table = pa.Table.from_batches(rbs)
self.partitions[table_location_key] = new_pa_table
size_cb(new_pa_table.num_rows)
time.sleep(0.1)

def subscribe_to_table_location_size(self, table_key: TableKeyImpl,
Expand Down Expand Up @@ -347,7 +355,7 @@ def test_partition_sub_failure(self):
table = data_service.make_table(TableKeyImpl("test"), refreshing=True)
with self.assertRaises(Exception) as cm:
# failure_cb will be called in the background thread after 2 PUG cycles, 3 seconds timeout should be enough
self.wait_ticking_table_update(table, 600, 3)
self.wait_ticking_table_update(table, 1024, 3)
self.assertTrue(table.is_failed)

def test_partition_size_sub_failure(self):
Expand All @@ -357,9 +365,23 @@ def test_partition_size_sub_failure(self):
data_service = TableDataService(backend)
backend.sub_partition_size_fail_test = True
table = data_service.make_table(TableKeyImpl("test"), refreshing=True)

# wait for location/size subscription to be established
self.wait_ticking_table_update(table, 2, 1)

with backend.size_sub_failure_cb_called_cond:
# the test backend will trigger a size subscription failure
if not backend.is_size_sub_failure_cb_called:
if not backend.size_sub_failure_cb_called_cond.wait(timeout=5):
self.fail("size subscription failure callback was not called in 5s")
else:
# size subscription failure callback was already called
pass

with self.assertRaises(Exception) as cm:
# failure_cb will be called in the background thread after 2 PUG cycles, 3 seconds timeout should be enough
self.wait_ticking_table_update(table, 600, 3)
# for a real PUG with 1s interval, the failure is buffered after the roots are
# processed on one cycle, it won't be delivered until the next cycle
self.wait_ticking_table_update(table, 1024, 2)

self.assertTrue(table.is_failed)

Expand Down