Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(parametric): validate telemetry schema #4076

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 83 additions & 18 deletions tests/parametric/test_telemetry.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

import pytest

from utils.telemetry_utils import TelemetryUtils
from utils.telemetry_utils import TelemetryUtils, TelemetryV2Validator
from utils import context, scenarios, rfc, features, missing_feature


Expand All @@ -29,13 +29,21 @@
"python": "DD_TRACE_HEADER_TAGS",
},
"trace_tags": {"dotnet": "DD_TAGS", "nodejs": "DD_TAGS", "python": "DD_TAGS"},
"trace_enabled": {"dotnet": "DD_TRACE_ENABLED", "nodejs": "tracing", "python": "DD_TRACE_ENABLED"},
"trace_enabled": {
"dotnet": "DD_TRACE_ENABLED",
"nodejs": "tracing",
"python": "DD_TRACE_ENABLED",
},
"profiling_enabled": {
"dotnet": "DD_PROFILING_ENABLED",
"nodejs": "profiling.enabled",
"python": "DD_PROFILING_ENABLED",
},
"appsec_enabled": {"dotnet": "DD_APPSEC_ENABLED", "nodejs": "appsec.enabled", "python": "DD_APPSEC_ENABLED"},
"appsec_enabled": {
"dotnet": "DD_APPSEC_ENABLED",
"nodejs": "appsec.enabled",
"python": "DD_APPSEC_ENABLED",
},
"data_streams_enabled": {
"dotnet": "DD_DATA_STREAMS_ENABLED",
"nodejs": "dsmEnabled",
Expand All @@ -52,6 +60,9 @@ def _mapped_telemetry_name(context, apm_telemetry_name):
return apm_telemetry_name


validator = TelemetryV2Validator()


@scenarios.parametric
@rfc("https://docs.google.com/document/d/1In4TfVBbKEztLzYg4g0si5H56uzAbYB3OfqzRGP2xhg/edit")
@features.telemetry_app_started_event
Expand All @@ -67,11 +78,15 @@ class Test_Defaults:
}
],
)
@missing_feature(context.library <= "python@2.16.0", reason="Reports configurations with unexpected names")
@missing_feature(
context.library <= "python@2.16.0",
reason="Reports configurations with unexpected names",
)
def test_library_settings(self, library_env, test_agent, test_library):
with test_library.dd_start_span("test"):
pass
event = test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
assert not validator.get_errors(event)
configuration = event["payload"]["configuration"]

configuration_by_name = {item["name"]: item for item in configuration}
Expand Down Expand Up @@ -100,6 +115,14 @@ def test_library_settings(self, library_env, test_agent, test_library):
if context.library == "python" and apm_telemetry_name in ("trace_sample_rate",):
# DD_TRACE_SAMPLE_RATE is not supported in ddtrace>=3.x
continue
if context.library == "nodejs" and apm_telemetry_name in (
"trace_sample_rate",
"profiling_enabled",
"appsec_enabled",
"data_streams_enabled",
):
# NodeJS do not report default sample rate if not set.
continue
apm_telemetry_name = _mapped_telemetry_name(context, apm_telemetry_name)

cfg_item = configuration_by_name.get(apm_telemetry_name)
Expand Down Expand Up @@ -141,11 +164,15 @@ class Test_Consistent_Configs:
}
],
)
@missing_feature(context.library <= "python@2.16.0", reason="Reports configurations with unexpected names")
@missing_feature(
context.library <= "python@2.16.0",
reason="Reports configurations with unexpected names",
)
def test_library_settings(self, library_env, test_agent, test_library):
with test_library.dd_start_span("test"):
pass
event = test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
assert not validator.get_errors(event)
configuration = event["payload"]["configuration"]
configuration_by_name = {item["name"]: item for item in configuration}

Expand Down Expand Up @@ -178,11 +205,15 @@ def test_library_settings(self, library_env, test_agent, test_library):
],
)
@missing_feature(context.library == "nodejs", reason="Not implemented")
@missing_feature(context.library <= "python@2.16.0", reason="Reports configurations with unexpected names")
@missing_feature(
context.library <= "python@2.16.0",
reason="Reports configurations with unexpected names",
)
def test_library_settings_2(self, library_env, test_agent, test_library):
with test_library.dd_start_span("test"):
pass
event = test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
assert not validator.get_errors(event)
configuration = event["payload"]["configuration"]
configuration_by_name = {item["name"]: item for item in configuration}

Expand Down Expand Up @@ -219,11 +250,15 @@ class Test_Environment:
}
],
)
@missing_feature(context.library <= "python@2.16.0", reason="Reports configurations with unexpected names")
@missing_feature(
context.library <= "python@2.16.0",
reason="Reports configurations with unexpected names",
)
def test_library_settings(self, library_env, test_agent, test_library):
with test_library.dd_start_span("test"):
pass
event = test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
assert not validator.get_errors(event)
configuration = event["payload"]["configuration"]

configuration_by_name = {item["name"]: item for item in configuration}
Expand Down Expand Up @@ -273,7 +308,8 @@ def test_library_settings(self, library_env, test_agent, test_library):
@missing_feature(context.library == "php", reason="Not implemented")
@missing_feature(context.library == "cpp", reason="Not implemented")
@missing_feature(
context.library <= "python@3.1.0", reason="OTEL Sampling config is mapped to a different datadog config"
context.library <= "python@3.1.0",
reason="OTEL Sampling config is mapped to a different datadog config",
)
@pytest.mark.parametrize(
"library_env",
Expand Down Expand Up @@ -310,6 +346,7 @@ def test_telemetry_otel_env_hiding(self, library_env, test_agent, test_library):
with test_library.dd_start_span("test"):
pass
event = test_agent.wait_for_telemetry_event("generate-metrics", wait_loops=400)
assert not validator.get_errors(event)
payload = event["payload"]
assert event["request_type"] == "generate-metrics"

Expand Down Expand Up @@ -366,10 +403,12 @@ def test_telemetry_otel_env_hiding(self, library_env, test_agent, test_library):
@missing_feature(context.library == "php", reason="Not implemented")
@missing_feature(context.library == "cpp", reason="Not implemented")
@missing_feature(
context.library <= "python@3.1.0", reason="OTEL Sampling config is mapped to a different datadog config"
context.library <= "python@3.1.0",
reason="OTEL Sampling config is mapped to a different datadog config",
)
@missing_feature(
context.library == "nodejs", reason="does not collect otel_env.invalid metrics for otel_resource_attributes"
context.library == "nodejs",
reason="does not collect otel_env.invalid metrics for otel_resource_attributes",
)
@pytest.mark.parametrize(
"library_env",
Expand Down Expand Up @@ -397,6 +436,7 @@ def test_telemetry_otel_env_invalid(self, library_env, test_agent, test_library)
with test_library.dd_start_span("test"):
pass
event = test_agent.wait_for_telemetry_event("generate-metrics", wait_loops=400)
assert not validator.get_errors(event)
payload = event["payload"]
assert event["request_type"] == "generate-metrics"

Expand Down Expand Up @@ -483,7 +523,8 @@ def test_telemetry_event_propagated(self, library_env, test_agent, test_library)
with test_library.dd_start_span("first_span"):
pass

test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
event = test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
assert not validator.get_errors(event)
requests = test_agent.raw_telemetry(clear=True)
assert len(requests) > 0, "There should be at least one telemetry event (app-started)"
for req in requests:
Expand Down Expand Up @@ -526,7 +567,9 @@ def test_telemetry_event_not_propagated(self, library_env, test_agent, test_libr
with test_library.dd_start_span("first_span"):
pass

test_agent.wait_for_telemetry_event("app-started")
event = test_agent.wait_for_telemetry_event("app-started")
assert not validator.get_errors(event)

requests = test_agent.raw_telemetry(clear=True)
assert len(requests) > 0, "There should be at least one telemetry event (app-started)"
for req in requests:
Expand Down Expand Up @@ -563,7 +606,8 @@ def get_app_started_configuration_by_name(test_agent, test_library):
with test_library.dd_start_span("first_span"):
pass

test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
event = test_agent.wait_for_telemetry_event("app-started", wait_loops=400)
assert not validator.get_errors(event)

requests = test_agent.raw_telemetry(clear=True)
bodies = list(Test_TelemetrySCAEnvVar.flatten_message_batch(requests))
Expand All @@ -587,16 +631,37 @@ def get_app_started_configuration_by_name(test_agent, test_library):
"library_env, specific_libraries_support, outcome_value",
[
({**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "true"}, False, True),
({**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "True"}, ("python", "golang"), True),
({**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "1"}, ("python", "golang"), True),
(
{**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "True"},
("python", "golang"),
True,
),
(
{**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "1"},
("python", "golang"),
True,
),
({**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "false"}, False, False),
({**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "False"}, ("python", "golang"), False),
({**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "0"}, ("python", "golang"), False),
(
{**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "False"},
("python", "golang"),
False,
),
(
{**DEFAULT_ENVVARS, "DD_APPSEC_SCA_ENABLED": "0"},
("python", "golang"),
False,
),
],
)
@missing_feature(context.library <= "python@2.16.0", reason="Converts boolean values to strings")
def test_telemetry_sca_enabled_propagated(
self, library_env, specific_libraries_support, outcome_value, test_agent, test_library
self,
library_env,
specific_libraries_support,
outcome_value,
test_agent,
test_library,
):
if specific_libraries_support and context.library not in specific_libraries_support:
pytest.xfail(f"{outcome_value} unsupported value for {context.library}")
Expand Down
44 changes: 44 additions & 0 deletions utils/telemetry_utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,47 @@
import glob
import json
import os
from pathlib import Path
from jsonschema.validators import Draft7Validator, RefResolver
import typing

ROOT_PROJECT = Path(__file__).parent.parent


class TelemetryV2Validator:
def __init__(self) -> None:
schema_store = {}

schema_dir = os.path.join(ROOT_PROJECT, "utils/interfaces/schemas/miscs/telemetry")
for schema_path in glob.iglob("**/*.json", root_dir=schema_dir, recursive=True):
with open(schema_dir + "/" + schema_path, "r") as f:
schema = json.load(f)
schema_store[schema["$id"]] = schema

main_schema = None
with open(schema_dir + "/v2/telemetry_request.json", "r") as f:
main_schema = json.load(f)

self.resolver = RefResolver.from_schema(main_schema, store=schema_store)
self.validator = Draft7Validator(
schema=main_schema,
resolver=self.resolver,
)

def validate(self, data: str) -> bool:
try:
self.validator.validate(data)
return True
except Exception:
return False

def get_errors(self, data: str) -> list[dict[str, typing.Any]]:
errors = []
for e in sorted(self.validator.iter_errors(data), key=lambda e: e.path):
errors.append({"reason": e.message, "location": e.json_path, "json": e.instance})
return errors


class TelemetryUtils:
test_loaded_dependencies = {
"dotnet": {"NodaTime": False},
Expand Down
Loading