Skip to content

Commit c940966

Browse files
committed
add datadog provider
refactor base and exception to resolve a circular import issue add datadog provider tests
1 parent cdf9084 commit c940966

File tree

9 files changed

+385
-14
lines changed

9 files changed

+385
-14
lines changed
Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,4 @@
1+
from aws_lambda_powertools.metrics.provider.base.exceptions import MetricValueError, SchemaValidationError
12
from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import MetricResolutionError, MetricUnitError
23

3-
4-
class SchemaValidationError(Exception):
5-
"""When serialization fail schema validation"""
6-
7-
pass
8-
9-
10-
class MetricValueError(Exception):
11-
"""When metric value isn't a valid number"""
12-
13-
pass
14-
15-
164
__all__ = ["MetricUnitError", "MetricResolutionError", "SchemaValidationError", "MetricValueError"]
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase
2+
from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics, DatadogProvider
23

34
__all__ = [
45
"MetricsBase",
56
"MetricsProviderBase",
7+
"DatadogMetrics",
8+
"DatadogProvider",
69
]
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
from aws_lambda_powertools.metrics.provider.base.base import (
2+
MetricsBase,
3+
MetricsProviderBase,
4+
reset_cold_start_flag_provider,
5+
)
6+
7+
__all__ = [
8+
"MetricsBase",
9+
"MetricsProviderBase",
10+
"reset_cold_start_flag_provider",
11+
]
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
class SchemaValidationError(Exception):
2+
"""When serialization fail schema validation"""
3+
4+
pass
5+
6+
7+
class MetricValueError(Exception):
8+
"""When metric value isn't a valid number"""
9+
10+
pass

aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,8 @@
1111
from typing import Any, Callable, Dict, List, Optional, Union
1212

1313
from aws_lambda_powertools.metrics.base import single_metric
14-
from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError
1514
from aws_lambda_powertools.metrics.provider import MetricsProviderBase
15+
from aws_lambda_powertools.metrics.provider.base.exceptions import MetricValueError, SchemaValidationError
1616
from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start
1717
from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS
1818
from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import (
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
from aws_lambda_powertools.metrics.provider.datadog.datadog import DatadogMetrics, DatadogProvider
2+
3+
__all__ = [
4+
"DatadogMetrics",
5+
"DatadogProvider",
6+
]
Lines changed: 261 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,261 @@
1+
from __future__ import annotations
2+
3+
import json
4+
import logging
5+
import numbers
6+
import os
7+
import time
8+
import warnings
9+
from typing import Any, List, Optional
10+
11+
from aws_lambda_powertools.metrics.provider import MetricsBase
12+
from aws_lambda_powertools.metrics.provider.base.exceptions import MetricValueError, SchemaValidationError
13+
14+
logger = logging.getLogger(__name__)
15+
16+
# Check if using datadog layer
17+
try:
18+
from datadog_lambda.metric import lambda_metric # type: ignore
19+
except ImportError:
20+
lambda_metric = None
21+
22+
DEFAULT_NAMESPACE = "default"
23+
24+
25+
class DatadogProvider:
26+
"""
27+
Class for datadog provider. This Class should only be used inside DatadogMetrics
28+
all datadog metric data will be stored as
29+
{
30+
"m": metric_name,
31+
"v": value,
32+
"e": timestamp
33+
"t": List["tag:value","tag2:value2"]
34+
}
35+
see https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77
36+
37+
Examples
38+
--------
39+
40+
"""
41+
42+
def __init__(self, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = False):
43+
"""
44+
45+
Parameters
46+
----------
47+
namespace: str
48+
For datadog, namespace will be appended in front of the metrics name in metrics exported.
49+
(namespace.metrics_name)
50+
flush_to_log: bool
51+
Flush datadog metrics to log (collect with log forwarder) rather than using datadog extension
52+
"""
53+
self.metrics: List = []
54+
self.namespace: str = namespace
55+
# either is true then flush to log
56+
self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log
57+
super().__init__()
58+
59+
# adding name,value,timestamp,tags
60+
def add_metric(
61+
self,
62+
name: str,
63+
value: float,
64+
timestamp: Optional[int] = None,
65+
tags: Optional[List] = None,
66+
**kwargs: Any,
67+
) -> None:
68+
"""
69+
The add_metrics function that will be used by metrics class.
70+
71+
Parameters
72+
----------
73+
name: str
74+
Name/Key for the metrics
75+
value: float
76+
Value for the metrics
77+
timestamp: int
78+
Timestamp in int for the metrics, default = time.time()
79+
tags: List[str]
80+
In format like List["tag:value","tag2:value2"]
81+
args: Any
82+
extra args will be dropped for compatibility
83+
kwargs: Any
84+
extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam']
85+
86+
Examples
87+
--------
88+
>>> provider = DatadogProvider()
89+
>>>
90+
>>> provider.add_metric(
91+
>>> name='coffee_house.order_value',
92+
>>> value=12.45,
93+
>>> tags=['product:latte', 'order:online'],
94+
>>> sales='sam'
95+
>>> )
96+
"""
97+
if not isinstance(value, numbers.Real):
98+
raise MetricValueError(f"{value} is not a valid number")
99+
if tags is None:
100+
tags = []
101+
if not timestamp:
102+
timestamp = int(time.time())
103+
for k, w in kwargs.items():
104+
tags.append(f"{k}:{w}")
105+
self.metrics.append({"m": name, "v": value, "e": timestamp, "t": tags})
106+
107+
def serialize(self) -> List:
108+
output_list: List = []
109+
110+
for single_metric in self.metrics:
111+
if self.namespace != DEFAULT_NAMESPACE:
112+
metric_name = f"{self.namespace}.{single_metric['m']}"
113+
else:
114+
metric_name = single_metric["m"]
115+
output_list.append(
116+
{
117+
"m": metric_name,
118+
"v": single_metric["v"],
119+
"e": single_metric["e"],
120+
"t": single_metric["t"],
121+
},
122+
)
123+
124+
return output_list
125+
126+
# flush serialized data to output
127+
def flush(self, metrics: List):
128+
"""
129+
130+
Parameters
131+
----------
132+
metrics: List[Dict]
133+
[{
134+
"m": metric_name,
135+
"v": value,
136+
"e": timestamp
137+
"t": List["tag:value","tag2:value2"]
138+
}]
139+
140+
Raises
141+
-------
142+
SchemaValidationError
143+
When metric object fails EMF schema validation
144+
"""
145+
if len(metrics) == 0:
146+
raise SchemaValidationError("Must contain at least one metric.")
147+
# submit through datadog extension
148+
if lambda_metric and self.flush_to_log is False:
149+
# use lambda_metric function from datadog package, submit metrics to datadog
150+
for metric_item in metrics:
151+
lambda_metric(
152+
metric_name=metric_item["m"],
153+
value=metric_item["v"],
154+
timestamp=metric_item["e"],
155+
tags=metric_item["t"],
156+
)
157+
else:
158+
# dd module not found: flush to log, this format can be recognized via datadog log forwarder
159+
# https://github.com/Datadog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77
160+
for metric_item in metrics:
161+
print(json.dumps(metric_item, separators=(",", ":")))
162+
163+
def clear_metrics(self):
164+
self.metrics = []
165+
166+
167+
class DatadogMetrics(MetricsBase):
168+
"""
169+
Class for datadog metrics
170+
171+
Parameters
172+
----------
173+
provider: DatadogProvider
174+
The datadog provider which will be used to process metrics data
175+
176+
Example
177+
-------
178+
**Creates a few metrics and publish at the end of a function execution**
179+
180+
>>> from aws_lambda_powertools.metrics.provider import DatadogMetrics, DatadogProvider
181+
>>>
182+
>>> dd_provider = DatadogProvider(namespace="Serverlesspresso")
183+
>>> metrics = DatadogMetrics(provider=dd_provider)
184+
>>>
185+
>>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False)
186+
>>> def lambda_handler(event, context):
187+
>>> metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online'])
188+
"""
189+
190+
# `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase`
191+
def __init__(self, provider: DatadogProvider):
192+
self.provider = provider
193+
super().__init__()
194+
195+
# drop additional kwargs to keep same experience
196+
def add_metric(
197+
self,
198+
name: str,
199+
value: float,
200+
timestamp: Optional[int] = None,
201+
tags: Optional[List] = None,
202+
*args,
203+
**kwargs,
204+
):
205+
"""
206+
The add_metrics function that will be used by metrics class.
207+
208+
Parameters
209+
----------
210+
name: str
211+
Name/Key for the metrics
212+
value: float
213+
Value for the metrics
214+
timestamp: int
215+
Timestamp in int for the metrics, default = time.time()
216+
tags: List[str]
217+
In format like List["tag:value","tag2:value2"],
218+
args: Any
219+
extra args will be dropped
220+
kwargs: Any
221+
extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam']
222+
223+
Examples
224+
--------
225+
>>> from aws_lambda_powertools.metrics.provider import DatadogMetrics, DatadogProvider
226+
>>>
227+
>>> metrics = DatadogMetrics(provider=DatadogProvider())
228+
>>> metrics.add_metric(
229+
>>> name='coffee_house.order_value',
230+
>>> value=12.45,
231+
>>> tags=['product:latte', 'order:online']
232+
>>> )
233+
"""
234+
self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags, **kwargs)
235+
236+
def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None:
237+
"""
238+
Manually flushes the metrics. This is normally not necessary,
239+
unless you're running on other runtimes besides Lambda, where the @log_metrics
240+
decorator already handles things for you.
241+
242+
Parameters
243+
----------
244+
raise_on_empty_metrics: bool
245+
raise exception if no metrics are emitted, by default False
246+
"""
247+
metrics = self.provider.serialize()
248+
if not metrics and not raise_on_empty_metrics:
249+
warnings.warn(
250+
"No application metrics to publish. The cold-start metric may be published if enabled. "
251+
"If application metrics should never be empty, consider using 'raise_on_empty_metrics'",
252+
stacklevel=2,
253+
)
254+
else:
255+
# will raise on empty metrics
256+
self.provider.flush(metrics)
257+
self.provider.clear_metrics()
258+
259+
def add_cold_start_metric(self, metric_name: str, function_name: str) -> None:
260+
logger.debug("Adding cold start metric and function_name tagging")
261+
self.add_metric(name="ColdStart", value=1, function_name=function_name)

0 commit comments

Comments
 (0)