|
| 1 | +# Generated by Django 5.2.1 |
| 2 | +import logging |
| 3 | +from collections.abc import Sequence |
| 4 | +from datetime import datetime |
| 5 | +from enum import Enum |
| 6 | +from typing import Any |
| 7 | + |
| 8 | +from django.db import migrations |
| 9 | +from django.db.backends.base.schema import BaseDatabaseSchemaEditor |
| 10 | +from django.db.migrations.state import StateApps |
| 11 | +from snuba_sdk import Column, Condition, Op |
| 12 | + |
| 13 | +from sentry import eventstore |
| 14 | +from sentry.new_migrations.migrations import CheckedMigration |
| 15 | +from sentry.snuba.dataset import Dataset |
| 16 | +from sentry.utils.query import RangeQuerySetWrapper |
| 17 | + |
| 18 | +logger = logging.getLogger(__name__) |
| 19 | + |
| 20 | + |
| 21 | +class EventOrdering(Enum): |
| 22 | + LATEST = ["project_id", "-timestamp", "-event_id"] |
| 23 | + OLDEST = ["project_id", "timestamp", "event_id"] |
| 24 | + RECOMMENDED = [ |
| 25 | + "-replay.id", |
| 26 | + "-trace.sampled", |
| 27 | + "num_processing_errors", |
| 28 | + "-profile.id", |
| 29 | + "-timestamp", |
| 30 | + "-event_id", |
| 31 | + ] |
| 32 | + |
| 33 | + |
| 34 | +def get_oldest_or_latest_event( |
| 35 | + group: Any, |
| 36 | + ordering: EventOrdering, |
| 37 | + conditions: Sequence[Condition] | None = None, |
| 38 | + start: datetime | None = None, |
| 39 | + end: datetime | None = None, |
| 40 | +) -> Any: |
| 41 | + dataset = Dataset.IssuePlatform |
| 42 | + |
| 43 | + all_conditions = [ |
| 44 | + Condition(Column("project_id"), Op.IN, [group.project.id]), |
| 45 | + Condition(Column("group_id"), Op.IN, [group.id]), |
| 46 | + ] |
| 47 | + |
| 48 | + if conditions: |
| 49 | + all_conditions.extend(conditions) |
| 50 | + |
| 51 | + events = eventstore.backend.get_events_snql( |
| 52 | + organization_id=group.project.organization_id, |
| 53 | + group_id=group.id, |
| 54 | + start=start, |
| 55 | + end=end, |
| 56 | + conditions=all_conditions, |
| 57 | + limit=1, |
| 58 | + orderby=ordering.value, |
| 59 | + referrer="Group.get_latest", |
| 60 | + dataset=dataset, |
| 61 | + tenant_ids={"organization_id": group.project.organization_id}, |
| 62 | + ) |
| 63 | + |
| 64 | + if events: |
| 65 | + return events[0].for_group(group) |
| 66 | + |
| 67 | + return None |
| 68 | + |
| 69 | + |
| 70 | +def backfill_metric_issue_detectorgroup( |
| 71 | + apps: StateApps, schema_editor: BaseDatabaseSchemaEditor |
| 72 | +) -> None: |
| 73 | + """ |
| 74 | + Backfill the DetectorGroup table for metric issues. |
| 75 | + """ |
| 76 | + Group = apps.get_model("sentry", "Group") |
| 77 | + DetectorGroup = apps.get_model("workflow_engine", "DetectorGroup") |
| 78 | + Detector = apps.get_model("workflow_engine", "Detector") |
| 79 | + |
| 80 | + for group in RangeQuerySetWrapper( |
| 81 | + Group.objects.filter(type=8001, detectorgroup__isnull=True) |
| 82 | + ): # metric issues |
| 83 | + # figure out the detector |
| 84 | + latest_event = get_oldest_or_latest_event(group, EventOrdering.LATEST) |
| 85 | + if not latest_event: |
| 86 | + logger.info("No latest event found for group", extra={"group_id": group.id}) |
| 87 | + continue |
| 88 | + |
| 89 | + occurrence = latest_event.occurrence |
| 90 | + if not occurrence: |
| 91 | + logger.info( |
| 92 | + "No occurrence found for latest event", extra={"event_id": latest_event.event_id} |
| 93 | + ) |
| 94 | + continue |
| 95 | + |
| 96 | + detector_id = occurrence.evidence_data.get("detector_id") |
| 97 | + if detector_id is None: |
| 98 | + logger.info( |
| 99 | + "No detector id found for occurrence", extra={"occurrence_id": occurrence.id} |
| 100 | + ) |
| 101 | + continue |
| 102 | + |
| 103 | + # try to fetch detector |
| 104 | + detector = Detector.objects.filter(id=detector_id).first() |
| 105 | + if detector is None: |
| 106 | + DetectorGroup.objects.create( |
| 107 | + group_id=group.id, |
| 108 | + detector_id=None, |
| 109 | + ) |
| 110 | + logger.info( |
| 111 | + "Creating DetectorGroup with null detector", |
| 112 | + extra={"group_id": group.id, "detector_id": detector_id}, |
| 113 | + ) |
| 114 | + continue |
| 115 | + |
| 116 | + DetectorGroup.objects.create( |
| 117 | + group_id=group.id, |
| 118 | + detector_id=detector.id, |
| 119 | + ) |
| 120 | + logger.info( |
| 121 | + "Creating DetectorGroup", |
| 122 | + extra={"group_id": group.id, "detector_id": detector_id}, |
| 123 | + ) |
| 124 | + |
| 125 | + |
| 126 | +class Migration(CheckedMigration): |
| 127 | + # This flag is used to mark that a migration shouldn't be automatically run in production. |
| 128 | + # This should only be used for operations where it's safe to run the migration after your |
| 129 | + # code has deployed. So this should not be used for most operations that alter the schema |
| 130 | + # of a table. |
| 131 | + # Here are some things that make sense to mark as post deployment: |
| 132 | + # - Large data migrations. Typically we want these to be run manually so that they can be |
| 133 | + # monitored and not block the deploy for a long period of time while they run. |
| 134 | + # - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to |
| 135 | + # run this outside deployments so that we don't block them. Note that while adding an index |
| 136 | + # is a schema change, it's completely safe to run the operation after the code has deployed. |
| 137 | + # Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment |
| 138 | + |
| 139 | + is_post_deployment = True |
| 140 | + |
| 141 | + dependencies = [ |
| 142 | + ("sentry", "1003_group_history_prev_history_safe_removal"), |
| 143 | + ("workflow_engine", "0098_detectorgroup_detector_set_null"), |
| 144 | + ] |
| 145 | + |
| 146 | + operations = [ |
| 147 | + migrations.RunPython( |
| 148 | + backfill_metric_issue_detectorgroup, |
| 149 | + migrations.RunPython.noop, |
| 150 | + hints={ |
| 151 | + "tables": [ |
| 152 | + "workflow_engine_detectorgroup", |
| 153 | + "sentry_group", |
| 154 | + ] |
| 155 | + }, |
| 156 | + ), |
| 157 | + ] |
0 commit comments