|
1 | 1 | from datetime import timedelta
|
| 2 | +from unittest import mock |
2 | 3 | from unittest.mock import patch
|
3 | 4 |
|
4 | 5 | from django.utils import timezone
|
|
10 | 11 | CheckInStatus,
|
11 | 12 | Monitor,
|
12 | 13 | MonitorCheckIn,
|
| 14 | + MonitorEnvBrokenDetection, |
13 | 15 | MonitorEnvironment,
|
14 | 16 | MonitorIncident,
|
15 | 17 | MonitorStatus,
|
@@ -240,3 +242,72 @@ def test_mark_ok_recovery_threshold(self, mock_produce_occurrence_to_kafka):
|
240 | 242 | "new_substatus": None,
|
241 | 243 | },
|
242 | 244 | ) == dict(status_change)
|
| 245 | + |
| 246 | + @mock.patch("sentry.analytics.record") |
| 247 | + def test_mark_ok_broken_recovery(self, mock_record): |
| 248 | + now = timezone.now().replace(second=0, microsecond=0) |
| 249 | + |
| 250 | + monitor = Monitor.objects.create( |
| 251 | + name="test monitor", |
| 252 | + organization_id=self.organization.id, |
| 253 | + project_id=self.project.id, |
| 254 | + type=MonitorType.CRON_JOB, |
| 255 | + config={ |
| 256 | + "schedule": "* * * * *", |
| 257 | + "schedule_type": ScheduleType.CRONTAB, |
| 258 | + "max_runtime": None, |
| 259 | + "checkin_margin": None, |
| 260 | + "recovery_threshold": None, |
| 261 | + }, |
| 262 | + ) |
| 263 | + |
| 264 | + # Start with monitor in an ERROR state and broken detection |
| 265 | + monitor_environment = MonitorEnvironment.objects.create( |
| 266 | + monitor=monitor, |
| 267 | + environment_id=self.environment.id, |
| 268 | + status=MonitorStatus.ERROR, |
| 269 | + last_checkin=now - timedelta(minutes=1), |
| 270 | + next_checkin=now, |
| 271 | + ) |
| 272 | + checkin = MonitorCheckIn.objects.create( |
| 273 | + monitor=monitor, |
| 274 | + monitor_environment=monitor_environment, |
| 275 | + project_id=self.project.id, |
| 276 | + status=CheckInStatus.ERROR, |
| 277 | + date_added=timezone.now() - timedelta(days=14), |
| 278 | + ) |
| 279 | + incident = MonitorIncident.objects.create( |
| 280 | + monitor=monitor, |
| 281 | + monitor_environment=monitor_environment, |
| 282 | + starting_checkin=checkin, |
| 283 | + starting_timestamp=checkin.date_added, |
| 284 | + ) |
| 285 | + MonitorEnvBrokenDetection.objects.create( |
| 286 | + monitor_incident=incident, |
| 287 | + ) |
| 288 | + |
| 289 | + # OK checkin comes in |
| 290 | + success_checkin = MonitorCheckIn.objects.create( |
| 291 | + monitor=monitor, |
| 292 | + monitor_environment=monitor_environment, |
| 293 | + project_id=self.project.id, |
| 294 | + status=CheckInStatus.OK, |
| 295 | + date_added=now, |
| 296 | + ) |
| 297 | + mark_ok(success_checkin, ts=now) |
| 298 | + |
| 299 | + # Monitor has recovered to OK with updated upcoming timestamps |
| 300 | + monitor_environment.refresh_from_db() |
| 301 | + assert monitor_environment.status == MonitorStatus.OK |
| 302 | + assert monitor_environment.next_checkin == now + timedelta(minutes=1) |
| 303 | + assert monitor_environment.next_checkin_latest == now + timedelta(minutes=2) |
| 304 | + assert monitor_environment.last_checkin == now |
| 305 | + |
| 306 | + # We recorded an analytics event |
| 307 | + mock_record.assert_called_with( |
| 308 | + "cron_monitor_broken_status.recovery", |
| 309 | + organization_id=self.organization.id, |
| 310 | + project_id=self.project.id, |
| 311 | + monitor_id=monitor.id, |
| 312 | + monitor_env_id=monitor_environment.id, |
| 313 | + ) |
0 commit comments