8
8
9
9
from sentry .issues .grouptype import GroupType
10
10
from sentry .issues .issue_occurrence import IssueEvidence , IssueOccurrence
11
- from sentry .issues .status_change_message import StatusChangeMessage
12
11
from sentry .types .actor import Actor
13
12
from sentry .workflow_engine .models import Condition , DataConditionGroup , DataPacket , Detector
14
- from sentry .workflow_engine .types import DetectorGroupKey , DetectorPriorityLevel
13
+ from sentry .workflow_engine .processors .data_condition_group import ProcessedDataConditionGroup
14
+ from sentry .workflow_engine .types import (
15
+ DetectorEvaluationResult ,
16
+ DetectorGroupKey ,
17
+ DetectorPriorityLevel ,
18
+ )
15
19
16
20
logger = logging .getLogger (__name__ )
17
21
18
22
DataPacketType = TypeVar ("DataPacketType" )
19
23
DataPacketEvaluationType = TypeVar ("DataPacketEvaluationType" )
24
+
25
+ # TODO - get more info about how this is used in issue platform
20
26
EventData = dict [str , Any ]
21
27
22
28
@@ -35,14 +41,14 @@ class EvidenceData(Generic[DataPacketEvaluationType]):
35
41
class DetectorOccurrence :
36
42
issue_title : str
37
43
subtitle : str
38
- resource_id : str | None = None
39
44
evidence_data : Mapping [str , Any ] = dataclasses .field (default_factory = dict )
40
45
evidence_display : Sequence [IssueEvidence ] = dataclasses .field (default_factory = list )
41
46
type : type [GroupType ]
42
47
level : str
43
48
culprit : str
44
- priority : int | None = None
49
+ resource_id : str | None = None
45
50
assignee : Actor | None = None
51
+ priority : DetectorPriorityLevel | None = None
46
52
47
53
def to_issue_occurrence (
48
54
self ,
@@ -73,20 +79,6 @@ def to_issue_occurrence(
73
79
)
74
80
75
81
76
- @dataclasses .dataclass (frozen = True )
77
- class DetectorEvaluationResult :
78
- # TODO - Should group key live at this level?
79
- group_key : DetectorGroupKey
80
- # TODO: Are these actually necessary? We're going to produce the occurrence in the detector, so we probably don't
81
- # need to know the other results externally
82
- is_triggered : bool
83
- priority : DetectorPriorityLevel
84
- # TODO: This is only temporarily optional. We should always have a value here if returning a result
85
- result : IssueOccurrence | StatusChangeMessage | None = None
86
- # Event data to supplement the `IssueOccurrence`, if passed.
87
- event_data : dict [str , Any ] | None = None
88
-
89
-
90
82
# TODO - DetectorHandler -> AbstractDetectorHandler? (then DetectorHandler is the base implementation)
91
83
class DetectorHandler (abc .ABC , Generic [DataPacketType , DataPacketEvaluationType ]):
92
84
def __init__ (self , detector : Detector ):
@@ -109,7 +101,7 @@ def __init__(self, detector: Detector):
109
101
@abc .abstractmethod
110
102
def evaluate (
111
103
self , data_packet : DataPacket [DataPacketType ]
112
- ) -> dict [DetectorGroupKey , DetectorEvaluationResult ]:
104
+ ) -> dict [DetectorGroupKey , DetectorEvaluationResult ] | None :
113
105
"""
114
106
This method is used to evaluate the data packet's value against the conditions on the detector.
115
107
"""
@@ -118,10 +110,9 @@ def evaluate(
118
110
@abc .abstractmethod
119
111
def create_occurrence (
120
112
self ,
121
- value : DataPacketEvaluationType ,
113
+ evaluation_result : ProcessedDataConditionGroup ,
114
+ data_packet : DataPacket [DataPacketType ],
122
115
priority : DetectorPriorityLevel ,
123
- # data_packet: DataPacketType, # TODO - having access to all the data being evaluated seems good
124
- # data_conditions: list[DataCondition], # TODO - list of the failing conditions might be nice
125
116
) -> tuple [DetectorOccurrence , EventData ]:
126
117
"""
127
118
This method provides the value that was evaluated against, the data packet that was
0 commit comments