|
17 | 17 | from unittest.mock import MagicMock, patch
|
18 | 18 |
|
19 | 19 | import contextlib
|
| 20 | +import datetime |
20 | 21 | import os
|
21 | 22 | import pytest
|
22 | 23 | import random
|
@@ -1287,6 +1288,75 @@ def test_collect_binlogs_to_purge():
|
1287 | 1288 | log.info.assert_any_call("Binlog %s has been replicated to all servers, purging", 3)
|
1288 | 1289 |
|
1289 | 1290 |
|
| 1291 | +def test_periodic_backup_based_on_exceeded_intervals(time_machine, master_controller) -> None: |
| 1292 | + # pylint: disable=protected-access |
| 1293 | + time_machine.move_to("2023-01-02T18:00:00") |
| 1294 | + |
| 1295 | + # By default backup_hour = 3, backup_interval_minutes = 1440 |
| 1296 | + m_controller, master = master_controller |
| 1297 | + |
| 1298 | + m_controller.switch_to_active_mode() |
| 1299 | + m_controller.start() |
| 1300 | + |
| 1301 | + def streaming_binlogs(controller: Controller, expected_completed_backups: int): |
| 1302 | + assert controller.backup_streams |
| 1303 | + assert controller.backup_streams[0].active_phase == BackupStream.ActivePhase.binlog |
| 1304 | + |
| 1305 | + complete_backups = [backup for backup in controller.state["backups"] if backup["completed_at"]] |
| 1306 | + assert len(complete_backups) == expected_completed_backups |
| 1307 | + |
| 1308 | + def flush_binlogs(): |
| 1309 | + with mysql_cursor(**master.connect_options) as cursor: |
| 1310 | + cursor.execute("FLUSH BINARY LOGS") |
| 1311 | + |
| 1312 | + # write some data for the first backup |
| 1313 | + flush_binlogs() |
| 1314 | + # first backup for 2023-01-02 3:00 should be generated at 6pm (time we started the service) |
| 1315 | + while_asserts(lambda: streaming_binlogs(m_controller, 1), timeout=10) |
| 1316 | + |
| 1317 | + # generate more data for second backup |
| 1318 | + flush_binlogs() |
| 1319 | + |
| 1320 | + # second backup ideally should be done at 2023-01-03 03:00, but following the |
| 1321 | + # "half of interval" rule (at least 12 hours of difference between most recent scheduled and current time) |
| 1322 | + # therefore, it should be actually scheduled at 2023-01-03 06:00 |
| 1323 | + time_machine.move_to("2023-01-03T03:00:00+00:00") |
| 1324 | + |
| 1325 | + expected_normalized_time = datetime.datetime(2023, 1, 3, 3, tzinfo=datetime.timezone.utc) |
| 1326 | + assert m_controller._current_normalized_backup_timestamp() == expected_normalized_time.isoformat() |
| 1327 | + |
| 1328 | + # no new backup should be scheduled |
| 1329 | + time.sleep(1) |
| 1330 | + min_created_at = datetime.datetime(2023, 1, 3, 3, tzinfo=datetime.timezone.utc).timestamp() |
| 1331 | + assert not any(bs.created_at >= min_created_at for bs in m_controller.backup_streams) |
| 1332 | + |
| 1333 | + time_machine.move_to("2023-01-03T06:00:00+00:00") |
| 1334 | + while_asserts(lambda: streaming_binlogs(m_controller, 2), timeout=10) |
| 1335 | + |
| 1336 | + # generate more data for third backup |
| 1337 | + flush_binlogs() |
| 1338 | + |
| 1339 | + # After second backup, the next scheduled one should be at 2023-02-04 03:00:00, but let's change |
| 1340 | + # backup_interval_minutes to 2880 (48 hours) |
| 1341 | + m_controller.backup_settings["backup_interval_minutes"] = 2880 |
| 1342 | + |
| 1343 | + time_machine.move_to("2023-01-04T06:00:00+00:00") |
| 1344 | + |
| 1345 | + expected_normalized_time = datetime.datetime(2023, 1, 3, 3, tzinfo=datetime.timezone.utc) |
| 1346 | + assert m_controller._current_normalized_backup_timestamp() == expected_normalized_time.isoformat() |
| 1347 | + |
| 1348 | + time.sleep(1) |
| 1349 | + # no new backup should be scheduled |
| 1350 | + min_created_at = datetime.datetime(2023, 1, 4, 3, tzinfo=datetime.timezone.utc).timestamp() |
| 1351 | + assert not any(bs.created_at >= min_created_at for bs in m_controller.backup_streams) |
| 1352 | + |
| 1353 | + time_machine.move_to("2023-01-05T03:00:00+00:00") |
| 1354 | + expected_normalized_time = datetime.datetime(2023, 1, 5, 3, tzinfo=datetime.timezone.utc) |
| 1355 | + |
| 1356 | + assert m_controller._current_normalized_backup_timestamp() == expected_normalized_time.isoformat() |
| 1357 | + while_asserts(lambda: streaming_binlogs(m_controller, 3), timeout=10) |
| 1358 | + |
| 1359 | + |
1290 | 1360 | @patch.object(RestoreCoordinator, "MAX_BASEBACKUP_ERRORS", 2)
|
1291 | 1361 | @patch.object(BasebackupRestoreOperation, "restore_backup", side_effect=Exception("failed restoring basebackup"))
|
1292 | 1362 | def test_backup_marked_as_broken_after_failed_restoration(
|
|
0 commit comments