Skip to content

Commit a47440d

Browse files
committed
backfill detectorgroup for metric issues
1 parent 2c878a7 commit a47440d

File tree

2 files changed

+248
-0
lines changed

2 files changed

+248
-0
lines changed
Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,157 @@
1+
# Generated by Django 5.2.1
2+
import logging
3+
from collections.abc import Sequence
4+
from datetime import datetime
5+
from enum import Enum
6+
from typing import Any
7+
8+
from django.db import migrations
9+
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
10+
from django.db.migrations.state import StateApps
11+
from snuba_sdk import Column, Condition, Op
12+
13+
from sentry import eventstore
14+
from sentry.new_migrations.migrations import CheckedMigration
15+
from sentry.snuba.dataset import Dataset
16+
from sentry.utils.query import RangeQuerySetWrapper
17+
18+
logger = logging.getLogger(__name__)
19+
20+
21+
class EventOrdering(Enum):
22+
LATEST = ["project_id", "-timestamp", "-event_id"]
23+
OLDEST = ["project_id", "timestamp", "event_id"]
24+
RECOMMENDED = [
25+
"-replay.id",
26+
"-trace.sampled",
27+
"num_processing_errors",
28+
"-profile.id",
29+
"-timestamp",
30+
"-event_id",
31+
]
32+
33+
34+
def get_oldest_or_latest_event(
35+
group: Any,
36+
ordering: EventOrdering,
37+
conditions: Sequence[Condition] | None = None,
38+
start: datetime | None = None,
39+
end: datetime | None = None,
40+
) -> Any:
41+
dataset = Dataset.IssuePlatform
42+
43+
all_conditions = [
44+
Condition(Column("project_id"), Op.IN, [group.project.id]),
45+
Condition(Column("group_id"), Op.IN, [group.id]),
46+
]
47+
48+
if conditions:
49+
all_conditions.extend(conditions)
50+
51+
events = eventstore.backend.get_events_snql(
52+
organization_id=group.project.organization_id,
53+
group_id=group.id,
54+
start=start,
55+
end=end,
56+
conditions=all_conditions,
57+
limit=1,
58+
orderby=ordering.value,
59+
referrer="Group.get_latest",
60+
dataset=dataset,
61+
tenant_ids={"organization_id": group.project.organization_id},
62+
)
63+
64+
if events:
65+
return events[0].for_group(group)
66+
67+
return None
68+
69+
70+
def backfill_metric_issue_detectorgroup(
71+
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
72+
) -> None:
73+
"""
74+
Backfill the DetectorGroup table for metric issues.
75+
"""
76+
Group = apps.get_model("sentry", "Group")
77+
DetectorGroup = apps.get_model("workflow_engine", "DetectorGroup")
78+
Detector = apps.get_model("workflow_engine", "Detector")
79+
80+
for group in RangeQuerySetWrapper(
81+
Group.objects.filter(type=8001, detectorgroup__isnull=True)
82+
): # metric issues
83+
# figure out the detector
84+
latest_event = get_oldest_or_latest_event(group, EventOrdering.LATEST)
85+
if not latest_event:
86+
logger.info("No latest event found for group", extra={"group_id": group.id})
87+
continue
88+
89+
occurrence = latest_event.occurrence
90+
if not occurrence:
91+
logger.info(
92+
"No occurrence found for latest event", extra={"event_id": latest_event.event_id}
93+
)
94+
continue
95+
96+
detector_id = occurrence.evidence_data.get("detector_id")
97+
if detector_id is None:
98+
logger.info(
99+
"No detector id found for occurrence", extra={"occurrence_id": occurrence.id}
100+
)
101+
continue
102+
103+
# try to fetch detector
104+
detector = Detector.objects.filter(id=detector_id).first()
105+
if detector is None:
106+
DetectorGroup.objects.create(
107+
group_id=group.id,
108+
detector_id=None,
109+
)
110+
logger.info(
111+
"Creating DetectorGroup with null detector",
112+
extra={"group_id": group.id, "detector_id": detector_id},
113+
)
114+
continue
115+
116+
DetectorGroup.objects.create(
117+
group_id=group.id,
118+
detector_id=detector.id,
119+
)
120+
logger.info(
121+
"Creating DetectorGroup",
122+
extra={"group_id": group.id, "detector_id": detector_id},
123+
)
124+
125+
126+
class Migration(CheckedMigration):
127+
# This flag is used to mark that a migration shouldn't be automatically run in production.
128+
# This should only be used for operations where it's safe to run the migration after your
129+
# code has deployed. So this should not be used for most operations that alter the schema
130+
# of a table.
131+
# Here are some things that make sense to mark as post deployment:
132+
# - Large data migrations. Typically we want these to be run manually so that they can be
133+
# monitored and not block the deploy for a long period of time while they run.
134+
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
135+
# run this outside deployments so that we don't block them. Note that while adding an index
136+
# is a schema change, it's completely safe to run the operation after the code has deployed.
137+
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
138+
139+
is_post_deployment = True
140+
141+
dependencies = [
142+
("sentry", "1003_group_history_prev_history_safe_removal"),
143+
("workflow_engine", "0098_detectorgroup_detector_set_null"),
144+
]
145+
146+
operations = [
147+
migrations.RunPython(
148+
backfill_metric_issue_detectorgroup,
149+
migrations.RunPython.noop,
150+
hints={
151+
"tables": [
152+
"workflow_engine_detectorgroup",
153+
"sentry_group",
154+
]
155+
},
156+
),
157+
]
Lines changed: 91 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,91 @@
1+
from sentry.incidents.grouptype import MetricIssue
2+
from sentry.incidents.models.alert_rule import AlertRuleDetectionType
3+
from sentry.issues.ingest import save_issue_occurrence
4+
from sentry.testutils.cases import TestMigrations
5+
from sentry.testutils.helpers.datetime import before_now
6+
from sentry.workflow_engine.models import Detector, DetectorGroup
7+
from tests.sentry.workflow_engine.test_base import BaseWorkflowTest
8+
9+
10+
class BackfillMetricIssueDetectorGroupTest(TestMigrations, BaseWorkflowTest):
11+
migrate_from = "0098_detectorgroup_detector_set_null"
12+
migrate_to = "0099_backfill_metric_issue_detectorgroup"
13+
app = "workflow_engine"
14+
15+
def setup_initial_state(self) -> None:
16+
self.org = self.create_organization(name="test-org")
17+
self.project = self.create_project(organization=self.org)
18+
19+
self.detector = Detector.objects.create(
20+
project=self.project,
21+
name="Test Detector",
22+
type=MetricIssue.slug,
23+
config={"detection_type": AlertRuleDetectionType.STATIC.value},
24+
)
25+
26+
occurrence_data = self.build_occurrence_data(
27+
event_id=self.event.event_id,
28+
project_id=self.project.id,
29+
fingerprint=[f"detector-{self.detector.id}"],
30+
evidence_data={"detector_id": self.detector.id},
31+
type=MetricIssue.type_id,
32+
)
33+
34+
self.occurrence, group_info = save_issue_occurrence(occurrence_data, self.event)
35+
assert group_info is not None
36+
self.metric_issue = group_info.group
37+
38+
event = self.store_event(
39+
data={
40+
"event_id": "b" * 32,
41+
"timestamp": before_now(seconds=1).isoformat(),
42+
},
43+
project_id=self.project.id,
44+
)
45+
occurrence_data = self.build_occurrence_data(
46+
event_id=event.event_id,
47+
project_id=self.project.id,
48+
fingerprint=[f"detector-{123}"],
49+
evidence_data={"detector_id": 123},
50+
type=MetricIssue.type_id,
51+
)
52+
53+
_, group_info = save_issue_occurrence(occurrence_data, event)
54+
assert group_info is not None
55+
self.metric_issue_deleted_detector = group_info.group
56+
57+
self.metric_issue_no_occurrence = self.create_group(
58+
project=self.project, type=MetricIssue.type_id
59+
)
60+
61+
self.metric_issue_existing_detectorgroup = self.create_group(
62+
project=self.project, type=MetricIssue.type_id
63+
)
64+
self.detector2 = Detector.objects.create(
65+
project=self.project,
66+
name="Test Detector 2",
67+
type=MetricIssue.slug,
68+
config={"detection_type": AlertRuleDetectionType.STATIC.value},
69+
)
70+
DetectorGroup.objects.all().delete()
71+
DetectorGroup.objects.create(
72+
group=self.metric_issue_existing_detectorgroup,
73+
detector=self.detector2,
74+
)
75+
76+
def test_migration(self) -> None:
77+
assert DetectorGroup.objects.filter(
78+
group=self.metric_issue, detector=self.detector
79+
).exists()
80+
81+
assert DetectorGroup.objects.filter(
82+
group=self.metric_issue_deleted_detector, detector=None
83+
).exists()
84+
85+
assert not DetectorGroup.objects.filter(
86+
group=self.metric_issue_no_occurrence
87+
).exists() # does not exist because we should figure out what to do with this
88+
89+
assert DetectorGroup.objects.filter(
90+
group=self.metric_issue_existing_detectorgroup, detector=self.detector2
91+
).exists()

0 commit comments

Comments
 (0)