diff --git a/src/sentry/workflow_engine/handlers/detector/base.py b/src/sentry/workflow_engine/handlers/detector/base.py index f33787eff12c33..be3f27cdd98cd2 100644 --- a/src/sentry/workflow_engine/handlers/detector/base.py +++ b/src/sentry/workflow_engine/handlers/detector/base.py @@ -34,6 +34,7 @@ class EvidenceData(Generic[DataPacketEvaluationType]): detector_id: int data_packet_source_id: int conditions: list[dict[str, Any]] + data_sources: list[dict[str, Any]] = dataclasses.field(default_factory=list, kw_only=True) @dataclasses.dataclass(frozen=True, kw_only=True) diff --git a/src/sentry/workflow_engine/handlers/detector/stateful.py b/src/sentry/workflow_engine/handlers/detector/stateful.py index 230e3ccbbc0524..48cc3110443576 100644 --- a/src/sentry/workflow_engine/handlers/detector/stateful.py +++ b/src/sentry/workflow_engine/handlers/detector/stateful.py @@ -9,6 +9,7 @@ from django.db.models import Q from sentry_redis_tools.retrying_cluster import RetryingRedisCluster +from sentry.api.serializers import serialize from sentry.issues.issue_occurrence import IssueOccurrence from sentry.issues.status_change_message import StatusChangeMessage from sentry.models.group import GroupStatus @@ -21,7 +22,7 @@ EventData, GroupedDetectorEvaluationResult, ) -from sentry.workflow_engine.models import DataPacket, Detector, DetectorState +from sentry.workflow_engine.models import DataPacket, DataSource, Detector, DetectorState from sentry.workflow_engine.processors.data_condition_group import ( ProcessedDataConditionGroup, process_data_condition_group, @@ -353,6 +354,29 @@ def build_detector_evidence_data( """ return {} + def _build_evidence_data_sources( + self, data_packet: DataPacket[DataPacketType] + ) -> list[dict[str, Any]]: + try: + data_sources = list( + DataSource.objects.filter(detectors=self.detector, source_id=data_packet.source_id) + ) + if not data_sources: + logger.warning( + "Matching data source not found for detector while generating occurrence evidence data", + extra={ + "detector_id": self.detector.id, + "data_packet_source_id": data_packet.source_id, + }, + ) + return [] + return serialize(data_sources) + except Exception: + logger.exception( + "Failed to serialize data source definition when building workflow engine evidence data" + ) + return [] + def _build_workflow_engine_evidence_data( self, evaluation_result: ProcessedDataConditionGroup, @@ -363,15 +387,18 @@ def _build_workflow_engine_evidence_data( Build the workflow engine specific evidence data. This is data that is common to all detectors. """ - return { + base: dict[str, Any] = { "detector_id": self.detector.id, "value": evaluation_value, "data_packet_source_id": str(data_packet.source_id), "conditions": [ result.condition.get_snapshot() for result in evaluation_result.condition_results ], + "data_sources": self._build_evidence_data_sources(data_packet), } + return base + def evaluate_impl( self, data_packet: DataPacket[DataPacketType] ) -> GroupedDetectorEvaluationResult: diff --git a/tests/sentry/incidents/test_metric_issue_detector_handler.py b/tests/sentry/incidents/test_metric_issue_detector_handler.py index 435434ba388025..a5e69543a56494 100644 --- a/tests/sentry/incidents/test_metric_issue_detector_handler.py +++ b/tests/sentry/incidents/test_metric_issue_detector_handler.py @@ -24,6 +24,7 @@ def generate_evidence_data( detector_trigger: DataCondition, extra_trigger: DataCondition | None = None, ): + self.query_subscription.refresh_from_db() conditions = [ { @@ -50,6 +51,29 @@ def generate_evidence_data( "alert_id": self.alert_rule.id, "data_packet_source_id": str(self.query_subscription.id), "conditions": conditions, + "data_sources": [ + { + "id": str(self.data_source.id), + "organizationId": str(self.organization.id), + "type": self.data_source.type, + "sourceId": str(self.query_subscription.id), + "queryObj": { + "id": str(self.query_subscription.id), + "status": self.query_subscription.status, + "subscription": self.query_subscription.subscription_id, + "snubaQuery": { + "id": str(self.snuba_query.id), + "dataset": self.snuba_query.dataset, + "query": self.snuba_query.query, + "aggregate": self.snuba_query.aggregate, + "timeWindow": self.snuba_query.time_window, + "environment": self.environment.name, + "eventTypes": ["error"], + "extrapolationMode": "unknown", + }, + }, + } + ], } return evidence_data diff --git a/tests/sentry/notifications/notification_action/test_metric_alert_registry_handlers.py b/tests/sentry/notifications/notification_action/test_metric_alert_registry_handlers.py index 9712b2b38ef9b5..b75c0fda9b9d61 100644 --- a/tests/sentry/notifications/notification_action/test_metric_alert_registry_handlers.py +++ b/tests/sentry/notifications/notification_action/test_metric_alert_registry_handlers.py @@ -101,6 +101,7 @@ def create_models(self): "condition_result": DetectorPriorityLevel.OK.value, }, ], + data_sources=[], alert_id=self.alert_rule.id, ) @@ -126,6 +127,7 @@ def create_models(self): "condition_result": DetectorPriorityLevel.HIGH.value, }, ], + data_sources=[], alert_id=self.alert_rule.id, ) self.group, self.event, self.group_event = self.create_group_event(