Skip to content

Commit 972d519

Browse files
authored
Merge branch 'master' into sy/bump-librdkafka
2 parents 36a2e4f + 415d227 commit 972d519

File tree

16 files changed

+148
-19
lines changed

16 files changed

+148
-19
lines changed

kubernetes_cluster_autoscaler/README.md

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,60 @@ No additional installation is needed on your server.
1919

2020
2. [Restart the Agent][5].
2121

22+
#### Metric collection
23+
24+
Make sure that the Prometheus-formatted metrics are exposed in your `kubernetes_cluster_autoscaler` cluster.
25+
For the Agent to start collecting metrics, the `kubernetes_cluster_autoscaler` pods need to be annotated.
26+
27+
[Kubernetes Cluster Autoscaler][11] has metrics and livenessProbe endpoints that can be accessed on port `8085`. These endpoints are located under `/metrics` and `/health-check` and provide valuable information about the state of your cluster during scaling operations.
28+
29+
**Note**: To change the default port, use the `--address` flag.
30+
31+
To configure the Cluster Autoscaler to expose metrics, do the following:
32+
33+
1. Enable access to the `/metrics` route and expose port `8085` for your Cluster Autoscaler deployment:
34+
35+
```
36+
ports:
37+
--name: app
38+
containerPort: 8085
39+
```
40+
41+
b) instruct your Prometheus to scrape it, by adding the following annotation to your Cluster Autoscaler service:
42+
```
43+
prometheus.io/scrape: true
44+
```
45+
46+
**Note**: The listed metrics can only be collected if they are available. Some metrics are generated only when certain actions are performed.
47+
48+
The only parameter required for configuring the `kubernetes_cluster_autoscaler` check is `openmetrics_endpoint`. This parameter should be set to the location where the Prometheus-formatted metrics are exposed. The default port is `8085`. To configure a different port, use the `METRICS_PORT` [environment variable][10]. In containerized environments, `%%host%%` should be used for [host autodetection][3].
49+
50+
```yaml
51+
apiVersion: v1
52+
kind: Pod
53+
# (...)
54+
metadata:
55+
name: '<POD_NAME>'
56+
annotations:
57+
ad.datadoghq.com/controller.checks: |
58+
{
59+
"kubernetes_cluster_autoscaler": {
60+
"init_config": {},
61+
"instances": [
62+
{
63+
"openmetrics_endpoint": "http://%%host%%:8085/metrics"
64+
}
65+
]
66+
}
67+
}
68+
# (...)
69+
spec:
70+
containers:
71+
- name: 'controller'
72+
# (...)
73+
```
74+
75+
2276
### Validation
2377

2478
[Run the Agent's status subcommand][6] and look for `kubernetes_cluster_autoscaler` under the Checks section.
@@ -53,3 +107,5 @@ Need help? Contact [Datadog support][9].
53107
[7]: https://github.com/DataDog/integrations-core/blob/master/kubernetes_cluster_autoscaler/metadata.csv
54108
[8]: https://github.com/DataDog/integrations-core/blob/master/kubernetes_cluster_autoscaler/assets/service_checks.json
55109
[9]: https://docs.datadoghq.com/help/
110+
[10]: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
111+
[11]: https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-monitor-cluster-autoscaler

mongo/changelog.d/17730.added

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Include namespace in DBM samples operation_metadata

mongo/datadog_checks/mongo/dbm/operation_samples.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,7 @@ def _get_operation_metadata(self, operation: dict) -> OperationSampleOperationMe
300300
"truncated": self._get_command_truncation_state(command),
301301
"client": self._get_operation_client(operation),
302302
"user": self._get_operation_user(operation),
303+
"ns": namespace,
303304
}
304305

305306
def _get_operation_stats(self, operation: dict) -> OperationSampleOperationStats:
@@ -361,6 +362,7 @@ def _create_operation_sample_payload(
361362
"shard": operation_metadata["shard"],
362363
"collection": operation_metadata["collection"],
363364
"comment": operation_metadata["comment"],
365+
"ns": operation_metadata["ns"],
364366
},
365367
"query_truncated": operation_metadata["truncated"],
366368
},

mongo/datadog_checks/mongo/dbm/types.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,7 @@ class OperationSampleOperationMetadata(TypedDict, total=False):
148148
truncated: Optional[str]
149149
client: OperationSampleClient
150150
user: Optional[str]
151+
ns: Optional[str]
151152

152153

153154
class OperationSampleActivityRecord(

mongo/tests/results/operation-samples-mongos.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -704,7 +704,8 @@
704704
"op": "query",
705705
"shard": "shard04",
706706
"collection": "users",
707-
"comment": "sort"
707+
"comment": "sort",
708+
"ns": "integration.$cmd"
708709
},
709710
"query_truncated": "not_truncated"
710711
},

mongo/tests/results/operation-samples-standalone.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,8 @@
125125
"op": "query",
126126
"shard": null,
127127
"collection": "products",
128-
"comment": "find"
128+
"comment": "find",
129+
"ns": "integration.products"
129130
},
130131
"query_truncated": "not_truncated"
131132
},

requirements-agent-release.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ datadog-scylla==2.7.1
149149
datadog-sidekiq==1.4.0
150150
datadog-silk==2.2.1
151151
datadog-singlestore==2.2.0
152-
datadog-snmp==7.3.0
152+
datadog-snmp==7.3.1
153153
datadog-snowflake==5.6.0
154154
datadog-solr==1.13.0
155155
datadog-sonarqube==3.2.2

snmp/CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,12 @@
22

33
<!-- towncrier release notes start -->
44

5+
## 7.3.1 / 2024-06-05
6+
7+
***Fixed***:
8+
9+
* Use a forced metric type of gauge for ccmRejectedPhones and ccmUnregisteredPhones so they are not incorrectly inferred to be rate types. ([#17722](https://github.com/DataDog/integrations-core/pull/17722))
10+
511
## 7.3.0 / 2024-04-26 / Agent 7.54.0
612

713
***Added***:

snmp/datadog_checks/snmp/__about__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,4 @@
22
# All rights reserved
33
# Licensed under a 3-clause BSD style license (see LICENSE)
44

5-
__version__ = '7.3.0'
5+
__version__ = '7.3.1'

snmp/datadog_checks/snmp/data/default_profiles/_cisco-voice.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,12 @@ metrics:
7474
name: ccmRegisteredPhones
7575
OID: 1.3.6.1.4.1.9.9.156.1.5.5.0
7676
- MIB: CISCO-CCM-MIB
77+
metric_type: gauge
7778
symbol:
7879
name: ccmRejectedPhones
7980
OID: 1.3.6.1.4.1.9.9.156.1.5.7.0
8081
- MIB: CISCO-CCM-MIB
82+
metric_type: gauge
8183
symbol:
8284
name: ccmUnregisteredPhones
8385
OID: 1.3.6.1.4.1.9.9.156.1.5.6.0

snmp/tests/test_profiles.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -182,12 +182,7 @@ def test_cisco_voice(aggregator):
182182
for cvp in cvp_gauges:
183183
aggregator.assert_metric('snmp.{}'.format(cvp), metric_type=aggregator.GAUGE, tags=tags)
184184

185-
ccms_counts = ["ccmRejectedPhones", "ccmUnregisteredPhones"]
186-
187-
ccms_gauges = ["ccmRegisteredGateways", "ccmRegisteredPhones"]
188-
189-
for ccm in ccms_counts:
190-
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.RATE, tags=tags)
185+
ccms_gauges = ["ccmRegisteredGateways", "ccmRegisteredPhones", "ccmRejectedPhones", "ccmUnregisteredPhones"]
191186

192187
for ccm in ccms_gauges:
193188
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.GAUGE, tags=tags)

sqlserver/changelog.d/17750.fixed

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
[sqlserver] fix missing sqlserver_version

sqlserver/datadog_checks/sqlserver/sqlserver.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,6 @@ def set_resource_tags(self):
234234
)
235235

236236
def set_resolved_hostname(self):
237-
# load static information cache
238237
self.load_static_information()
239238
if self._resolved_hostname is None:
240239
if self._config.reported_hostname:
@@ -735,6 +734,7 @@ def _check_database_conns(self):
735734

736735
def check(self, _):
737736
if self.do_check:
737+
self.load_static_information()
738738
# configure custom queries for the check
739739
if self._query_manager is None:
740740
# use QueryManager to process custom queries

traefik_mesh/README.md

Lines changed: 67 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ Traefik Mesh can be configured to expose Prometheus-formatted metrics. The Datad
2525

2626
In addition, a small subset of metrics can be collected by communicating with different API endpoints. Specifically:
2727
- `/api/version`: Version information on the Traefik proxy.
28-
- `/api/status/nodes`: Ready status of nodes visible by the Traefik [controller][12].
28+
- `/api/status/nodes`: Ready status of nodes visible by the Traefik [controller][5].
2929
- `/api/status/readiness`: Ready status of the Traefik controller.
3030

3131
**Note**: This check uses [OpenMetrics][11] for metric collection, which requires Python 3.
@@ -40,7 +40,70 @@ Make sure that the Prometheus-formatted metrics are exposed in your Traefik Mesh
4040
When configuring the Traefik Mesh check, you can use the following parameters:
4141
- `openmetrics_endpoint`: This parameter should be set to the location where the Prometheus-formatted metrics are exposed. The default port is `8082`, but it can be configured using the `--entryPoints.metrics.address`. In containerized environments, `%%host%%` can be used for [host autodetection][3].
4242
- `traefik_proxy_api_endpooint:` This parameter is optional. The default port is `8080` and can be configured using `--entryPoints.traefik.address`. In containerized environments, `%%host%%` can be used for [host autodetection][3].
43-
- `traefik_controller_api_endpoint`: This parameter is optional. The default port is set to `9000`.
43+
- `traefik_controller_api_endpoint`: This parameter is optional. The default port is set to `9000`.
44+
45+
#### Traefik Proxy
46+
```yaml
47+
# (...)
48+
metadata:
49+
name: '<POD_NAME>'
50+
annotations:
51+
ad.datadoghq.com/<CONTAINER_NAME>.checks: |
52+
{
53+
"traefik_mesh": {
54+
"init_config": {},
55+
"instances": [
56+
{
57+
"openmetrics_endpoint": "http://%%host%%:8082/metrics",
58+
"traefik_proxy_api_endpoint": "http://%%host%%:8080"
59+
}
60+
]
61+
}
62+
}
63+
# (...)
64+
spec:
65+
containers:
66+
- name: <CONTAINER_NAME>
67+
# (...)
68+
```
69+
70+
#### Traefik Controller
71+
```yaml
72+
# (...)
73+
metadata:
74+
name: '<POD_NAME>'
75+
annotations:
76+
ad.datadoghq.com/<CONTAINER_NAME>.checks: |
77+
{
78+
"traefik_mesh": {
79+
"init_config": {},
80+
"instances": [
81+
{
82+
"traefik_controller_api_endpoint": "http://%%host%%:9000"
83+
}
84+
]
85+
}
86+
}
87+
# (...)
88+
spec:
89+
containers:
90+
- name: <CONTAINER_NAME>
91+
# (...)
92+
```
93+
94+
See the [sample traefik_mesh.d/conf.yaml][4] for all available configuration options.
95+
96+
### Log collection
97+
98+
_Available for Agent versions >6.0_
99+
100+
Traefik Mesh logs can be collected from the different Traefik Mesh pods through Kubernetes. Collecting logs is disabled by default in the Datadog Agent. To enable it, see [Kubernetes Log Collection][12].
101+
102+
See the [Autodiscovery Integration Templates][3] for guidance on applying the parameters below.
103+
104+
| Parameter | Value |
105+
| -------------- | ---------------------------------------------------- |
106+
| `<LOG_CONFIG>` | `{"source": "traefik_mesh", "service": "<SERVICE_NAME>"}` |
44107

45108
### Validation
46109

@@ -69,11 +132,11 @@ Need help? Contact [Datadog support][9].
69132
[2]: https://app.datadoghq.com/account/settings/agent/latest
70133
[3]: https://docs.datadoghq.com/agent/kubernetes/integrations/
71134
[4]: https://github.com/DataDog/integrations-core/blob/master/traefik_mesh/datadog_checks/traefik_mesh/data/conf.yaml.example
72-
[5]: https://docs.datadoghq.com/agent/guide/agent-commands/#start-stop-and-restart-the-agent
135+
[5]: https://doc.traefik.io/traefik-mesh/api/
73136
[6]: https://docs.datadoghq.com/agent/guide/agent-commands/#agent-status-and-information
74137
[7]: https://github.com/DataDog/integrations-core/blob/master/traefik_mesh/metadata.csv
75138
[8]: https://github.com/DataDog/integrations-core/blob/master/traefik_mesh/assets/service_checks.json
76139
[9]: https://docs.datadoghq.com/help/
77140
[10]: https://doc.traefik.io/traefik/observability/metrics/overview/
78141
[11]: https://docs.datadoghq.com/integrations/openmetrics/
79-
[12]: https://doc.traefik.io/traefik-mesh/api/
142+
[12]: https://docs.datadoghq.com/containers/kubernetes/log/
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
Using constant values instead of reassigning variables in each check run

windows_service/datadog_checks/windows_service/windows_service.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ class ServiceView(object):
115115
}
116116
STARTUP_TYPE_DELAYED_AUTO = "automatic_delayed_start"
117117
STARTUP_TYPE_UNKNOWN = "unknown"
118+
DISPLAY_NAME_UNKNOWN = "Not_Found"
118119

119120
def __init__(self, scm_handle, name):
120121
self.scm_handle = scm_handle
@@ -308,18 +309,16 @@ def check(self, instance):
308309
for service in services_unseen:
309310
# if a name doesn't match anything (wrong name or no permission to access the service), report UNKNOWN
310311
status = self.UNKNOWN
311-
startup_type_string = ServiceView.STARTUP_TYPE_UNKNOWN
312-
display_name = "Not_Found"
313312

314313
tags = ['windows_service:{}'.format(service)]
315314

316315
tags.extend(custom_tags)
317316

318317
if instance.get('windows_service_startup_type_tag', False):
319-
tags.append('windows_service_startup_type:{}'.format(startup_type_string))
318+
tags.append('windows_service_startup_type:{}'.format(ServiceView.STARTUP_TYPE_UNKNOWN))
320319

321320
if instance.get('collect_display_name_as_tag', False):
322-
tags.append('display_name:{}'.format(display_name))
321+
tags.append('display_name:{}'.format(ServiceView.DISPLAY_NAME_UNKNOWN))
323322

324323
if not instance.get('disable_legacy_service_tag', False):
325324
self._log_deprecation('service_tag', 'windows_service')

0 commit comments

Comments
 (0)