Skip to content

ref(onboarding): centralize project flags & signals #91921

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 35 additions & 31 deletions src/sentry/event_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,16 +105,14 @@
from sentry.plugins.base import plugins
from sentry.quotas.base import index_data_category
from sentry.receivers.features import record_event_processed
from sentry.receivers.onboarding import (
record_first_insight_span,
record_first_transaction,
record_release_received,
)
from sentry.receivers.onboarding import record_release_received
from sentry.reprocessing2 import is_reprocessed_event
from sentry.seer.signed_seer_api import make_signed_seer_api_request
from sentry.signals import (
first_event_received,
first_event_with_minified_stack_trace_received,
first_insight_span_received,
first_transaction_received,
issue_unresolved,
)
from sentry.tasks.process_buffer import buffer_incr
Expand All @@ -136,6 +134,7 @@
from sentry.utils.outcomes import Outcome, track_outcome
from sentry.utils.performance_issues.performance_detection import detect_performance_problems
from sentry.utils.performance_issues.performance_problem import PerformanceProblem
from sentry.utils.projectflags import set_project_flag_and_signal
from sentry.utils.safe import get_path, safe_execute, setdefault_path, trim
from sentry.utils.sdk import set_span_data
from sentry.utils.tag_normalization import normalized_sdk_tag_from_event
Expand Down Expand Up @@ -220,27 +219,6 @@ def plugin_is_regression(group: Group, event: BaseEvent) -> bool:
return True


def get_project_insight_flag(project: Project, module: InsightModules):
if module == InsightModules.HTTP:
return project.flags.has_insights_http
elif module == InsightModules.DB:
return project.flags.has_insights_db
elif module == InsightModules.ASSETS:
return project.flags.has_insights_assets
elif module == InsightModules.APP_START:
return project.flags.has_insights_app_start
elif module == InsightModules.SCREEN_LOAD:
return project.flags.has_insights_screen_load
elif module == InsightModules.VITAL:
return project.flags.has_insights_vitals
elif module == InsightModules.CACHE:
return project.flags.has_insights_caches
elif module == InsightModules.QUEUE:
return project.flags.has_insights_queues
elif module == InsightModules.LLM_MONITORING:
return project.flags.has_insights_llm_monitoring


def has_pending_commit_resolution(group: Group) -> bool:
"""
Checks that the most recent commit that fixes a group has had a chance to release
Expand Down Expand Up @@ -569,8 +547,11 @@ def save_error_events(
has_event_minified_stack_trace(job["event"])
and not project.flags.has_minified_stack_trace
):
first_event_with_minified_stack_trace_received.send_robust(
project=project, event=job["event"], sender=Project
set_project_flag_and_signal(
project,
"has_minified_stack_trace",
first_event_with_minified_stack_trace_received,
event=job["event"],
)

if is_reprocessed:
Expand Down Expand Up @@ -2475,6 +2456,19 @@ def _detect_performance_problems(jobs: Sequence[Job], projects: ProjectsMapping)
)


INSIGHT_MODULE_TO_PROJECT_FLAG_NAME: dict[InsightModules, str] = {
InsightModules.HTTP: "has_insights_http",
InsightModules.DB: "has_insights_db",
InsightModules.ASSETS: "has_insights_assets",
InsightModules.APP_START: "has_insights_app_start",
InsightModules.SCREEN_LOAD: "has_insights_screen_load",
InsightModules.VITAL: "has_insights_vitals",
InsightModules.CACHE: "has_insights_caches",
InsightModules.QUEUE: "has_insights_queues",
InsightModules.LLM_MONITORING: "has_insights_llm_monitoring",
}


@sentry_sdk.tracing.trace
def _record_transaction_info(
jobs: Sequence[Job], projects: ProjectsMapping, skip_send_first_transaction: bool
Expand All @@ -2490,12 +2484,22 @@ def _record_transaction_info(
record_event_processed(project, event)

if not skip_send_first_transaction:
record_first_transaction(project, event.datetime)
set_project_flag_and_signal(
project,
"has_transactions",
first_transaction_received,
event=event,
)

spans = job["data"]["spans"]
for module, is_module in INSIGHT_MODULE_FILTERS.items():
if not get_project_insight_flag(project, module) and is_module(spans):
record_first_insight_span(project, module)
if is_module(spans):
set_project_flag_and_signal(
project,
INSIGHT_MODULE_TO_PROJECT_FLAG_NAME[module],
first_insight_span_received,
module=module,
)

if job["release"]:
environment = job["data"].get("environment") or None # coorce "" to None
Expand Down
14 changes: 4 additions & 10 deletions src/sentry/feedback/usecases/create_feedback.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from sentry.types.group import GroupSubStatus
from sentry.utils import metrics
from sentry.utils.outcomes import Outcome, track_outcome
from sentry.utils.projectflags import set_project_flag_and_signal
from sentry.utils.safe import get_path

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -369,17 +370,10 @@ def create_feedback_issue(
validate_issue_platform_event_schema(event_fixed)

# Analytics
if not project.flags.has_feedbacks:
first_feedback_received.send_robust(project=project, sender=Project)
set_project_flag_and_signal(project, "has_feedbacks", first_feedback_received)

if (
source
in [
FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE,
]
and not project.flags.has_new_feedbacks
):
first_new_feedback_received.send_robust(project=project, sender=Project)
if source == FeedbackCreationSource.NEW_FEEDBACK_ENVELOPE:
set_project_flag_and_signal(project, "has_new_feedbacks", first_new_feedback_received)

# Send to issue platform for processing.
produce_occurrence_to_kafka(
Expand Down
15 changes: 9 additions & 6 deletions src/sentry/monitors/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,25 +23,28 @@
from sentry.users.models.user import User
from sentry.utils.audit import create_audit_entry, create_system_audit_entry
from sentry.utils.auth import AuthenticatedHttpRequest
from sentry.utils.projectflags import set_project_flag_and_signal


def signal_first_checkin(project: Project, monitor: Monitor):
if not project.flags.has_cron_checkins:
# Backfill users that already have cron monitors
check_and_signal_first_monitor_created(project, None, False)
transaction.on_commit(
lambda: first_cron_checkin_received.send_robust(
project=project, monitor_id=str(monitor.guid), sender=Project
lambda: set_project_flag_and_signal(
project,
"has_new_feedbacks",
first_cron_checkin_received,
monitor_id=str(monitor.guid),
),
router.db_for_write(Project),
)


def check_and_signal_first_monitor_created(project: Project, user, from_upsert: bool):
if not project.flags.has_cron_monitors:
first_cron_monitor_created.send_robust(
project=project, user=user, from_upsert=from_upsert, sender=Project
)
set_project_flag_and_signal(
project, "has_cron_monitors", first_cron_monitor_created, user=user, from_upsert=from_upsert
)


def signal_monitor_created(project: Project, user, from_upsert: bool, monitor: Monitor, request):
Expand Down
4 changes: 2 additions & 2 deletions src/sentry/profiles/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
from sentry.utils.kafka_config import get_kafka_producer_cluster_options, get_topic_definition
from sentry.utils.locking import UnableToAcquireLock
from sentry.utils.outcomes import Outcome, track_outcome
from sentry.utils.projectflags import set_project_flag_and_signal
from sentry.utils.sdk import set_span_data

REVERSE_DEVICE_CLASS = {next(iter(tags)): label for label, tags in DEVICE_CLASS.items()}
Expand Down Expand Up @@ -276,8 +277,7 @@ def process_profile_task(

if sampled:
with metrics.timer("process_profile.track_outcome.accepted"):
if not project.flags.has_profiles:
first_profile_received.send_robust(project=project, sender=Project)
set_project_flag_and_signal(project, "has_profiles", first_profile_received)
try:
if quotas.backend.should_emit_profile_duration_outcome(
organization=organization, profile=profile
Expand Down
102 changes: 22 additions & 80 deletions src/sentry/receivers/onboarding.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from django.utils import timezone as django_timezone

from sentry import analytics
from sentry.constants import InsightModules
from sentry.integrations.base import IntegrationDomain, get_integration_types
from sentry.integrations.services.integration import RpcIntegration, integration_service
from sentry.models.organization import Organization
Expand Down Expand Up @@ -180,21 +179,12 @@ def record_first_event(project, event, **kwargs):


@first_transaction_received.connect(weak=False, dispatch_uid="onboarding.record_first_transaction")
def _record_first_transaction(project, event, **kwargs):
return record_first_transaction(project, event.datetime, **kwargs)


def record_first_transaction(project, datetime, **kwargs):
if project.flags.has_transactions:
return

project.update(flags=F("flags").bitor(Project.flags.has_transactions))

def record_first_transaction(project, event, **kwargs):
OrganizationOnboardingTask.objects.record(
organization_id=project.organization_id,
task=OnboardingTask.FIRST_TRANSACTION,
status=OnboardingTaskStatus.COMPLETE,
date_completed=datetime,
date_completed=event.datetime,
)

analytics.record(
Expand All @@ -208,8 +198,6 @@ def record_first_transaction(project, datetime, **kwargs):

@first_profile_received.connect(weak=False, dispatch_uid="onboarding.record_first_profile")
def record_first_profile(project, **kwargs):
project.update(flags=F("flags").bitor(Project.flags.has_profiles))

analytics.record(
"first_profile.sent",
user_id=get_owner_id(project),
Expand Down Expand Up @@ -246,8 +234,6 @@ def record_first_replay(project, **kwargs):

@first_flag_received.connect(weak=False, dispatch_uid="onboarding.record_first_flag")
def record_first_flag(project, **kwargs):
project.update(flags=F("flags").bitor(Project.flags.has_flags))

analytics.record(
"first_flag.sent",
organization_id=project.organization_id,
Expand All @@ -258,8 +244,6 @@ def record_first_flag(project, **kwargs):

@first_feedback_received.connect(weak=False, dispatch_uid="onboarding.record_first_feedback")
def record_first_feedback(project, **kwargs):
project.update(flags=F("flags").bitor(Project.flags.has_feedbacks))

analytics.record(
"first_feedback.sent",
user_id=get_owner_id(project),
Expand All @@ -273,8 +257,6 @@ def record_first_feedback(project, **kwargs):
weak=False, dispatch_uid="onboarding.record_first_new_feedback"
)
def record_first_new_feedback(project, **kwargs):
project.update(flags=F("flags").bitor(Project.flags.has_new_feedbacks))

analytics.record(
"first_new_feedback.sent",
user_id=get_owner_id(project),
Expand All @@ -286,16 +268,13 @@ def record_first_new_feedback(project, **kwargs):

@first_cron_monitor_created.connect(weak=False, dispatch_uid="onboarding.record_first_cron_monitor")
def record_first_cron_monitor(project, user, from_upsert, **kwargs):
updated = project.update(flags=F("flags").bitor(Project.flags.has_cron_monitors))

if updated:
analytics.record(
"first_cron_monitor.created",
user_id=get_owner_id(project, user),
organization_id=project.organization_id,
project_id=project.id,
from_upsert=from_upsert,
)
analytics.record(
"first_cron_monitor.created",
user_id=get_owner_id(project, user),
organization_id=project.organization_id,
project_id=project.id,
from_upsert=from_upsert,
)


@cron_monitor_created.connect(weak=False, dispatch_uid="onboarding.record_cron_monitor_created")
Expand All @@ -313,8 +292,6 @@ def record_cron_monitor_created(project, user, from_upsert, **kwargs):
weak=False, dispatch_uid="onboarding.record_first_cron_checkin"
)
def record_first_cron_checkin(project, monitor_id, **kwargs):
project.update(flags=F("flags").bitor(Project.flags.has_cron_checkins))

analytics.record(
"first_cron_checkin.sent",
user_id=get_owner_id(project),
Expand All @@ -324,30 +301,10 @@ def record_first_cron_checkin(project, monitor_id, **kwargs):
)


@first_insight_span_received.connect(
weak=False, dispatch_uid="onboarding.record_first_insight_span"
)
def record_first_insight_span(project, module, **kwargs):
flag = None
if module == InsightModules.HTTP:
flag = Project.flags.has_insights_http
elif module == InsightModules.DB:
flag = Project.flags.has_insights_db
elif module == InsightModules.ASSETS:
flag = Project.flags.has_insights_assets
elif module == InsightModules.APP_START:
flag = Project.flags.has_insights_app_start
elif module == InsightModules.SCREEN_LOAD:
flag = Project.flags.has_insights_screen_load
elif module == InsightModules.VITAL:
flag = Project.flags.has_insights_vitals
elif module == InsightModules.CACHE:
flag = Project.flags.has_insights_caches
elif module == InsightModules.QUEUE:
flag = Project.flags.has_insights_queues
elif module == InsightModules.LLM_MONITORING:
flag = Project.flags.has_insights_llm_monitoring

if flag is not None:
project.update(flags=F("flags").bitor(flag))

analytics.record(
"first_insight_span.sent",
user_id=get_owner_id(project),
Expand All @@ -358,9 +315,6 @@ def record_first_insight_span(project, module, **kwargs):
)


first_insight_span_received.connect(record_first_insight_span, weak=False)


# TODO (mifu67): update this to use the new org member invite model
@member_invited.connect(weak=False, dispatch_uid="onboarding.record_member_invited")
def record_member_invited(member, user, **kwargs):
Expand Down Expand Up @@ -426,28 +380,16 @@ def record_event_with_first_minified_stack_trace_for_project(project, event, **k
)
return

# First, only enter this logic if we've never seen a minified stack trace before
if not project.flags.has_minified_stack_trace:
# Next, attempt to update the flag, but ONLY if the flag is currently not set.
# The number of affected rows tells us whether we succeeded or not. If we didn't, then skip sending the event.
# This guarantees us that this analytics event will only be ever sent once.
affected = Project.objects.filter(
id=project.id, flags=F("flags").bitand(~Project.flags.has_minified_stack_trace)
).update(flags=F("flags").bitor(Project.flags.has_minified_stack_trace))

if (
project.date_added > START_DATE_TRACKING_FIRST_EVENT_WITH_MINIFIED_STACK_TRACE_PER_PROJ
and affected > 0
):
analytics.record(
"first_event_with_minified_stack_trace_for_project.sent",
user_id=owner_id,
organization_id=project.organization_id,
project_id=project.id,
platform=event.platform,
project_platform=project.platform,
url=dict(event.tags).get("url", None),
)
if project.date_added > START_DATE_TRACKING_FIRST_EVENT_WITH_MINIFIED_STACK_TRACE_PER_PROJ:
analytics.record(
"first_event_with_minified_stack_trace_for_project.sent",
user_id=owner_id,
organization_id=project.organization_id,
project_id=project.id,
platform=event.platform,
project_platform=project.platform,
url=dict(event.tags).get("url", None),
)


@event_processed.connect(weak=False, dispatch_uid="onboarding.record_sourcemaps_received")
Expand Down
4 changes: 2 additions & 2 deletions src/sentry/replays/usecases/ingest/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
from sentry.signals import first_replay_received
from sentry.utils import json, metrics
from sentry.utils.outcomes import Outcome, track_outcome
from sentry.utils.projectflags import set_project_flag_and_signal

CACHE_TIMEOUT = 3600
COMMIT_FREQUENCY_SEC = 1
Expand Down Expand Up @@ -120,8 +121,7 @@ def _track_initial_segment_event(
key_id: int | None,
received: int,
) -> None:
if not project.flags.has_replays:
first_replay_received.send_robust(project=project, sender=Project)
set_project_flag_and_signal(project, "has_replays", first_replay_received)

track_outcome(
org_id=org_id,
Expand Down
Loading
Loading