From b9f8ceae69c6a29c102060c7d530af68adf95d96 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 30 May 2023 14:09:17 -0700 Subject: [PATCH 01/32] Use a different Metric class for different provider --- .../metrics/provider/__init__.py | 12 ++ .../metrics/provider/base.py | 126 ++++++++++++++++++ .../provider/datadog_provider_draft.py | 96 +++++++++++++ 3 files changed, 234 insertions(+) create mode 100644 aws_lambda_powertools/metrics/provider/__init__.py create mode 100644 aws_lambda_powertools/metrics/provider/base.py create mode 100644 aws_lambda_powertools/metrics/provider/datadog_provider_draft.py diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py new file mode 100644 index 00000000000..93ce6df91fe --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -0,0 +1,12 @@ +from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase +from aws_lambda_powertools.metrics.provider.datadog_provider_draft import ( + DataDogMetrics, + DataDogProvider, +) + +__all__ = [ + "MetricsBase", + "MetricsProviderBase", + "DataDogMetrics", + "DataDogProvider", +] diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py new file mode 100644 index 00000000000..9c91a238510 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -0,0 +1,126 @@ +import functools +import logging +from abc import ABC, abstractmethod +from typing import Any, Callable, Dict, Optional, Union + +logger = logging.getLogger(__name__) + + +class MetricsProviderBase(ABC): + """Class for metric provider template + + Use this template to create your own metric provider. + + """ + + # General add metric function. Should return combined metrics Dict + @abstractmethod + def add_metric(self, *args, **kwargs): + pass + + # serialize and return dict for flushing + @abstractmethod + def serialize(self, *args, **kwargs): + pass + + # flush serialized data to output, or send to API directly + @abstractmethod + def flush(self, *args, **kwargs): + pass + + +class MetricsBase(ABC): + """Class for metric template + + Use this template to create your own metric class. + + """ + + @abstractmethod + def add_metric(self, *args, **kwargs): + pass + + @abstractmethod + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + pass + + def log_metrics( + self, + lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + ): + """Decorator to serialize and publish metrics at the end of a function execution. + + Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler). + + Example + ------- + **Lambda function using tracer and metrics decorators** + + from aws_lambda_powertools import Metrics, Tracer + + metrics = Metrics(service="payment") + tracer = Tracer(service="payment") + + @tracer.capture_lambda_handler + @metrics.log_metrics + def handler(event, context): + ... + + Parameters + ---------- + lambda_handler : Callable[[Any, Any], Any], optional + lambda function handler, by default None + capture_cold_start_metric : bool, optional + captures cold start metric, by default False + raise_on_empty_metrics : bool, optional + raise exception if no metrics are emitted, by default False + default_dimensions: Dict[str, str], optional + metric dimensions as key=value that will always be present + + Raises + ------ + e + Propagate error received + """ + + # If handler is None we've been called with parameters + # Return a partial function with args filled + if lambda_handler is None: + logger.debug("Decorator called with parameters") + return functools.partial( + self.log_metrics, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + ) + + @functools.wraps(lambda_handler) + def decorate(event, context): + try: + response = lambda_handler(event, context) + if capture_cold_start_metric: + self._add_cold_start_metric(context=context) + finally: + self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) + + return response + + return decorate + + def _add_cold_start_metric(self, context: Any) -> None: + """Add cold start metric and function_name dimension + + Parameters + ---------- + context : Any + Lambda context + """ + global is_cold_start + if not is_cold_start: + return + + logger.debug("Adding cold start metric and function_name dimension") + self.add_metric(name="ColdStart", value=1) + + is_cold_start = False diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py new file mode 100644 index 00000000000..6945185d6a7 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +import json +import logging +import numbers +import time +import warnings +from typing import Dict, List + +from aws_lambda_powertools.metrics.exceptions import MetricValueError +from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase + +logger = logging.getLogger(__name__) + +# Check if using layer +try: + from datadog import lambda_metric +except ImportError: + lambda_metric = None + + +class DataDogProvider(MetricsProviderBase): + """Class for datadog provider.""" + + def __init__(self, namespace): + self.metrics = [] + self.namespace = namespace + super().__init__() + + # adding timestamp, tags. unit, resolution, name will not be used + def add_metric(self, name, value, timestamp, tag: List = None): + if not isinstance(value, numbers.Real): + raise MetricValueError(f"{value} is not a valid number") + if not timestamp: + timestamp = time.time() + self.metrics.append({"m": name, "v": float(value), "e": timestamp, "t": []}) + + # serialize for flushing + def serialize(self) -> Dict: + # logic here is to add dimension and metadata to each metric's tag with "key:value" format + extra_tags = [] + output_list = [] + + for single_metric in self.metrics: + output_list.append( + { + "m": f"{self.namespace}.{single_metric['m']}", + "v": single_metric["v"], + "e": single_metric["e"], + "t": single_metric["t"] + extra_tags, + } + ) + + return {"List": output_list} + + # flush serialized data to output + def flush(self, metrics): + # submit through datadog extension + if lambda_metric: + for metric_item in metrics.get("List"): + lambda_metric( + metric_name=metric_item["m"], + value=metric_item["v"], + timestamp=metric_item["e"], + tags=metric_item["t"], + ) + # flush to log with datadog format + # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 + else: + for metric_item in metrics.get("List"): + print(json.dumps(metric_item, separators=(",", ":"))) + + def clear(self): + self.metrics = [] + + +class DataDogMetrics(MetricsBase): + """Class for datadog metrics.""" + + def __init__(self, provider): + self.provider = provider + super().__init__() + + def add_metric(self, name: str, value: float, timestamp: time, tags: List = None): + self.provider.add_metric(name, value, timestamp, tags) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + metrics = self.provider.serialize() + if not metrics and raise_on_empty_metrics: + warnings.warn( + "No application metrics to publish. The cold-start metric may be published if enabled. " + "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", + stacklevel=2, + ) + self.provider.flush(metrics) + self.provider.clear() From 06c9c8e27860d41a72609114b46e90b9e86a3f61 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 30 May 2023 14:37:18 -0700 Subject: [PATCH 02/32] fix static checking error --- aws_lambda_powertools/metrics/provider/base.py | 2 ++ .../metrics/provider/datadog_provider_draft.py | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 9c91a238510..2afb2d68dfd 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -5,6 +5,8 @@ logger = logging.getLogger(__name__) +is_cold_start = True + class MetricsProviderBase(ABC): """Class for metric provider template diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py index 6945185d6a7..f587659f10d 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py @@ -14,7 +14,7 @@ # Check if using layer try: - from datadog import lambda_metric + from datadog_lambda.metric import lambda_metric except ImportError: lambda_metric = None @@ -28,7 +28,7 @@ def __init__(self, namespace): super().__init__() # adding timestamp, tags. unit, resolution, name will not be used - def add_metric(self, name, value, timestamp, tag: List = None): + def add_metric(self, name: str, value: float, timestamp: float, tag: List): if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") if not timestamp: @@ -38,8 +38,8 @@ def add_metric(self, name, value, timestamp, tag: List = None): # serialize for flushing def serialize(self) -> Dict: # logic here is to add dimension and metadata to each metric's tag with "key:value" format - extra_tags = [] - output_list = [] + extra_tags: List = [] + output_list: List = [] for single_metric in self.metrics: output_list.append( @@ -81,7 +81,7 @@ def __init__(self, provider): self.provider = provider super().__init__() - def add_metric(self, name: str, value: float, timestamp: time, tags: List = None): + def add_metric(self, name: str, value: float, timestamp: float, tags: List): self.provider.add_metric(name, value, timestamp, tags) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: From 10514f7507dfd64c5928350d1d0dfbc9307327a0 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 30 May 2023 14:48:30 -0700 Subject: [PATCH 03/32] fix static checking error --- .../metrics/provider/datadog_provider_draft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py index f587659f10d..20ecca860fb 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py @@ -14,7 +14,7 @@ # Check if using layer try: - from datadog_lambda.metric import lambda_metric + from datadog_lambda.metric import lambda_metric # type: ignore except ImportError: lambda_metric = None From c9b018f269123257e746c515b53931d4a9bc0195 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Thu, 1 Jun 2023 15:15:35 -0700 Subject: [PATCH 04/32] optimize docstring --- .../provider/datadog_provider_draft.py | 44 ++++++++++++++----- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py index 20ecca860fb..7aa82c82ca5 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py @@ -5,14 +5,14 @@ import numbers import time import warnings -from typing import Dict, List +from typing import Dict, List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase logger = logging.getLogger(__name__) -# Check if using layer +# Check if using datadog layer try: from datadog_lambda.metric import lambda_metric # type: ignore except ImportError: @@ -20,20 +20,30 @@ class DataDogProvider(MetricsProviderBase): - """Class for datadog provider.""" + """Class for datadog provider. + all datadog metric data will be stored as + see https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 + { + "m": metric_name, + "v": value, + "e": timestamp + "t": List["tag:value","tag2:value2"] + } + """ def __init__(self, namespace): self.metrics = [] self.namespace = namespace super().__init__() - # adding timestamp, tags. unit, resolution, name will not be used - def add_metric(self, name: str, value: float, timestamp: float, tag: List): + # adding name,value,timestamp,tags + # consider directly calling lambda_metric function here + def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None): if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") if not timestamp: timestamp = time.time() - self.metrics.append({"m": name, "v": float(value), "e": timestamp, "t": []}) + self.metrics.append({"m": name, "v": int(value), "e": timestamp, "t": tags}) # serialize for flushing def serialize(self) -> Dict: @@ -57,6 +67,7 @@ def serialize(self) -> Dict: def flush(self, metrics): # submit through datadog extension if lambda_metric: + # use lambda_metric function from datadog package, submit metrics to datadog for metric_item in metrics.get("List"): lambda_metric( metric_name=metric_item["m"], @@ -64,9 +75,9 @@ def flush(self, metrics): timestamp=metric_item["e"], tags=metric_item["t"], ) - # flush to log with datadog format - # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 else: + # flush to log with datadog format + # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 for metric_item in metrics.get("List"): print(json.dumps(metric_item, separators=(",", ":"))) @@ -75,14 +86,25 @@ def clear(self): class DataDogMetrics(MetricsBase): - """Class for datadog metrics.""" + """Class for datadog metrics standalone class. + Example + ------- + dd_provider = DataDogProvider(namespace="default") + metrics = DataDogMetrics(provider=dd_provider) + + @metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) + def lambda_handler(event, context) + metrics.add_metric(name="item_sold",value=1,tags) + """ + + # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` def __init__(self, provider): self.provider = provider super().__init__() - def add_metric(self, name: str, value: float, timestamp: float, tags: List): - self.provider.add_metric(name, value, timestamp, tags) + def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None): + self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: metrics = self.provider.serialize() From 58bc274f08f27765d51bb1963a03f7bfab656820 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Mon, 12 Jun 2023 16:46:20 -0700 Subject: [PATCH 05/32] add alias CloudWatchEMF to original Metrics class --- aws_lambda_powertools/metrics/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index 5f30f14102d..a872ba9d934 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -8,7 +8,7 @@ SchemaValidationError, ) from .metric import single_metric -from .metrics import EphemeralMetrics, Metrics +from .metrics import CloudWatchEMF, EphemeralMetrics, Metrics __all__ = [ "Metrics", @@ -20,4 +20,5 @@ "MetricResolutionError", "SchemaValidationError", "MetricValueError", + "CloudWatchEMF", ] From 4de8842136a9beccdf5275f46b059a5917696c45 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Mon, 12 Jun 2023 16:46:52 -0700 Subject: [PATCH 06/32] add alias CloudWatchEMF to original Metrics class --- aws_lambda_powertools/metrics/metrics.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 085ebf9053f..12d81cf4446 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -119,6 +119,10 @@ def clear_metrics(self) -> None: self.set_default_dimensions(**self.default_dimensions) +# add alias for original EMF format to make the provider more explicit +CloudWatchEMF = Metrics + + class EphemeralMetrics(MetricManager): """Non-singleton version of Metrics to not persist metrics across instances From fedb3a09bbdfd0174d781d330a1831e04105ebbd Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 13 Jun 2023 11:35:51 -0700 Subject: [PATCH 07/32] Move Metrics to Provider --- aws_lambda_powertools/metrics/__init__.py | 2 +- aws_lambda_powertools/metrics/metrics.py | 138 +----------------- .../metrics/provider/__init__.py | 8 + .../metrics/provider/cloudwatch_emf.py | 136 +++++++++++++++++ .../provider/datadog_provider_draft.py | 28 ++-- 5 files changed, 163 insertions(+), 149 deletions(-) create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf.py diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index a872ba9d934..8227ca410d0 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -8,7 +8,7 @@ SchemaValidationError, ) from .metric import single_metric -from .metrics import CloudWatchEMF, EphemeralMetrics, Metrics +from .provider.cloudwatch_emf import CloudWatchEMF, EphemeralMetrics, Metrics __all__ = [ "Metrics", diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 12d81cf4446..0c8ac73cdd1 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -1,136 +1,4 @@ -from typing import Any, Dict, Optional +# NOTE: keeps for compatibility +from .provider.cloudwatch_emf import EphemeralMetrics, Metrics -from .base import MetricManager - - -class Metrics(MetricManager): - """Metrics create an EMF object with up to 100 metrics - - Use Metrics when you need to create multiple metrics that have - dimensions in common (e.g. service_name="payment"). - - Metrics up to 100 metrics in memory and are shared across - all its instances. That means it can be safely instantiated outside - of a Lambda function, or anywhere else. - - A decorator (log_metrics) is provided so metrics are published at the end of its execution. - If more than 100 metrics are added at a given function execution, - these metrics are serialized and published before adding a given metric - to prevent metric truncation. - - Example - ------- - **Creates a few metrics and publish at the end of a function execution** - - from aws_lambda_powertools import Metrics - - metrics = Metrics(namespace="ServerlessAirline", service="payment") - - @metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(): - metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) - metrics.add_dimension(name="function_version", value="$LATEST") - - return True - - Environment variables - --------------------- - POWERTOOLS_METRICS_NAMESPACE : str - metric namespace - POWERTOOLS_SERVICE_NAME : str - service name used for default dimension - - Parameters - ---------- - service : str, optional - service name to be used as metric dimension, by default "service_undefined" - namespace : str, optional - Namespace for metrics - - Raises - ------ - MetricUnitError - When metric unit isn't supported by CloudWatch - MetricResolutionError - When metric resolution isn't supported by CloudWatch - MetricValueError - When metric value isn't a number - SchemaValidationError - When metric object fails EMF schema validation - """ - - # NOTE: We use class attrs to share metrics data across instances - # this allows customers to initialize Metrics() throughout their code base (and middlewares) - # and not get caught by accident with metrics data loss, or data deduplication - # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't - # Result: ProductCreated is created twice as we now have 2 different EMF blobs - _metrics: Dict[str, Any] = {} - _dimensions: Dict[str, str] = {} - _metadata: Dict[str, Any] = {} - _default_dimensions: Dict[str, Any] = {} - - def __init__(self, service: Optional[str] = None, namespace: Optional[str] = None): - self.metric_set = self._metrics - self.metadata_set = self._metadata - self.default_dimensions = self._default_dimensions - self.dimension_set = self._dimensions - - self.dimension_set.update(**self._default_dimensions) - return super().__init__( - namespace=namespace, - service=service, - metric_set=self.metric_set, - dimension_set=self.dimension_set, - metadata_set=self.metadata_set, - ) - - def set_default_dimensions(self, **dimensions) -> None: - """Persist dimensions across Lambda invocations - - Parameters - ---------- - dimensions : Dict[str, Any], optional - metric dimensions as key=value - - Example - ------- - **Sets some default dimensions that will always be present across metrics and invocations** - - from aws_lambda_powertools import Metrics - - metrics = Metrics(namespace="ServerlessAirline", service="payment") - metrics.set_default_dimensions(environment="demo", another="one") - - @metrics.log_metrics() - def lambda_handler(): - return True - """ - for name, value in dimensions.items(): - self.add_dimension(name, value) - - self.default_dimensions.update(**dimensions) - - def clear_default_dimensions(self) -> None: - self.default_dimensions.clear() - - def clear_metrics(self) -> None: - super().clear_metrics() - # re-add default dimensions - self.set_default_dimensions(**self.default_dimensions) - - -# add alias for original EMF format to make the provider more explicit -CloudWatchEMF = Metrics - - -class EphemeralMetrics(MetricManager): - """Non-singleton version of Metrics to not persist metrics across instances - - NOTE: This is useful when you want to: - - - Create metrics for distinct namespaces - - Create the same metrics with different dimensions more than once - """ - - def __init__(self, service: Optional[str] = None, namespace: Optional[str] = None): - super().__init__(namespace=namespace, service=service) +__all__ = ["Metrics", "EphemeralMetrics"] diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index 93ce6df91fe..822c0275264 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -1,4 +1,9 @@ from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase +from aws_lambda_powertools.metrics.provider.cloudwatch_emf import ( + CloudWatchEMF, + EphemeralMetrics, + Metrics, +) from aws_lambda_powertools.metrics.provider.datadog_provider_draft import ( DataDogMetrics, DataDogProvider, @@ -9,4 +14,7 @@ "MetricsProviderBase", "DataDogMetrics", "DataDogProvider", + "Metrics", + "EphemeralMetrics", + "CloudWatchEMF", ] diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py new file mode 100644 index 00000000000..777d4ca35b8 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py @@ -0,0 +1,136 @@ +from typing import Any, Dict, Optional + +from aws_lambda_powertools.metrics.base import MetricManager + + +class Metrics(MetricManager): + """Metrics create an EMF object with up to 100 metrics + + Use Metrics when you need to create multiple metrics that have + dimensions in common (e.g. service_name="payment"). + + Metrics up to 100 metrics in memory and are shared across + all its instances. That means it can be safely instantiated outside + of a Lambda function, or anywhere else. + + A decorator (log_metrics) is provided so metrics are published at the end of its execution. + If more than 100 metrics are added at a given function execution, + these metrics are serialized and published before adding a given metric + to prevent metric truncation. + + Example + ------- + **Creates a few metrics and publish at the end of a function execution** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + + @metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(): + metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) + metrics.add_dimension(name="function_version", value="$LATEST") + + return True + + Environment variables + --------------------- + POWERTOOLS_METRICS_NAMESPACE : str + metric namespace + POWERTOOLS_SERVICE_NAME : str + service name used for default dimension + + Parameters + ---------- + service : str, optional + service name to be used as metric dimension, by default "service_undefined" + namespace : str, optional + Namespace for metrics + + Raises + ------ + MetricUnitError + When metric unit isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch + MetricValueError + When metric value isn't a number + SchemaValidationError + When metric object fails EMF schema validation + """ + + # NOTE: We use class attrs to share metrics data across instances + # this allows customers to initialize Metrics() throughout their code base (and middlewares) + # and not get caught by accident with metrics data loss, or data deduplication + # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't + # Result: ProductCreated is created twice as we now have 2 different EMF blobs + _metrics: Dict[str, Any] = {} + _dimensions: Dict[str, str] = {} + _metadata: Dict[str, Any] = {} + _default_dimensions: Dict[str, Any] = {} + + def __init__(self, service: Optional[str] = None, namespace: Optional[str] = None): + self.metric_set = self._metrics + self.metadata_set = self._metadata + self.default_dimensions = self._default_dimensions + self.dimension_set = self._dimensions + + self.dimension_set.update(**self._default_dimensions) + return super().__init__( + namespace=namespace, + service=service, + metric_set=self.metric_set, + dimension_set=self.dimension_set, + metadata_set=self.metadata_set, + ) + + def set_default_dimensions(self, **dimensions) -> None: + """Persist dimensions across Lambda invocations + + Parameters + ---------- + dimensions : Dict[str, Any], optional + metric dimensions as key=value + + Example + ------- + **Sets some default dimensions that will always be present across metrics and invocations** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + metrics.set_default_dimensions(environment="demo", another="one") + + @metrics.log_metrics() + def lambda_handler(): + return True + """ + for name, value in dimensions.items(): + self.add_dimension(name, value) + + self.default_dimensions.update(**dimensions) + + def clear_default_dimensions(self) -> None: + self.default_dimensions.clear() + + def clear_metrics(self) -> None: + super().clear_metrics() + # re-add default dimensions + self.set_default_dimensions(**self.default_dimensions) + + +# add alias for original EMF format to make the provider more explicit +CloudWatchEMF = Metrics + + +class EphemeralMetrics(MetricManager): + """Non-singleton version of Metrics to not persist metrics across instances + + NOTE: This is useful when you want to: + + - Create metrics for distinct namespaces + - Create the same metrics with different dimensions more than once + """ + + def __init__(self, service: Optional[str] = None, namespace: Optional[str] = None): + super().__init__(namespace=namespace, service=service) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py index 7aa82c82ca5..eefa6083c2d 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py @@ -5,7 +5,7 @@ import numbers import time import warnings -from typing import Dict, List, Optional +from typing import List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase @@ -31,9 +31,9 @@ class DataDogProvider(MetricsProviderBase): } """ - def __init__(self, namespace): - self.metrics = [] - self.namespace = namespace + def __init__(self, namespace: str = "default"): + self.metrics: List = [] + self.namespace: str = namespace super().__init__() # adding name,value,timestamp,tags @@ -45,10 +45,9 @@ def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, t timestamp = time.time() self.metrics.append({"m": name, "v": int(value), "e": timestamp, "t": tags}) - # serialize for flushing - def serialize(self) -> Dict: + # serialize for flushing (Do we really need this function for datadog?) + def serialize(self) -> List: # logic here is to add dimension and metadata to each metric's tag with "key:value" format - extra_tags: List = [] output_list: List = [] for single_metric in self.metrics: @@ -57,18 +56,18 @@ def serialize(self) -> Dict: "m": f"{self.namespace}.{single_metric['m']}", "v": single_metric["v"], "e": single_metric["e"], - "t": single_metric["t"] + extra_tags, + "t": single_metric["t"], } ) - return {"List": output_list} + return output_list # flush serialized data to output - def flush(self, metrics): + def flush(self, metrics: List): # submit through datadog extension if lambda_metric: # use lambda_metric function from datadog package, submit metrics to datadog - for metric_item in metrics.get("List"): + for metric_item in metrics: lambda_metric( metric_name=metric_item["m"], value=metric_item["v"], @@ -78,7 +77,7 @@ def flush(self, metrics): else: # flush to log with datadog format # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 - for metric_item in metrics.get("List"): + for metric_item in metrics: print(json.dumps(metric_item, separators=(",", ":"))) def clear(self): @@ -103,7 +102,10 @@ def __init__(self, provider): self.provider = provider super().__init__() - def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None): + # drop additional kwargs to keep same experience + def add_metric( + self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None, *args, **kwargs + ): self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: From 169f02c9ea5f02f4c5f4db2b7a54aa0b587b8b7e Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Thu, 15 Jun 2023 14:24:41 -0700 Subject: [PATCH 08/32] add sample document --- docs/core/metrics.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 6948a3d4c8d..7f06d152c32 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -317,6 +317,30 @@ These issues are exacerbated when you create **(A)** metric dimensions condition That is why `Metrics` shares data across instances by default, as that covers 80% of use cases and different personas using Powertools. This allows them to instantiate `Metrics` in multiple places throughout their code - be a separate file, a middleware, or an abstraction that sets default dimensions. +### Observability providers + +!!! In this context, an observability provider is an AWS Lambda Partner offering a platform for logging, metrics, traces, etc. + +You can send metrics to the observability provider of your choice via Lambda Extensions. However, the default +CloudWatch EMF Metrics format will not be accepted for most observability providers. + +#### Built-in providers + +In this case, we and our AWS Lambda Partners are offering built-in provider to make metrics submission easier. + +You can import from metric provider package, init the provider and use them as default metrics class + +```python title="Using built-in Datadog Metrics Provider" +from aws_lambda_powertools.metrics.provider.datadog_provider_draft import DataDogProvider,DataDogMetrics + +dd_provider = DataDogProvider(namespace="default") +metrics = DataDogMetrics(provider=dd_provider) + +@metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) +def lambda_handler(event, context) + metrics.add_metric(name="item_sold",value=1,tags=["category:online"]) +``` + ## Testing your code ### Environment variables From 7bfd8748f32eb49693bdcf622f3153b305bbcb48 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Thu, 15 Jun 2023 15:41:55 -0700 Subject: [PATCH 09/32] reformat code block --- docs/core/metrics.md | 13 +++---------- examples/metrics/src/use_providers.py | 12 ++++++++++++ 2 files changed, 15 insertions(+), 10 deletions(-) create mode 100644 examples/metrics/src/use_providers.py diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 7f06d152c32..61609803d7e 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -328,17 +328,10 @@ CloudWatch EMF Metrics format will not be accepted for most observability provid In this case, we and our AWS Lambda Partners are offering built-in provider to make metrics submission easier. -You can import from metric provider package, init the provider and use them as default metrics class +You can import from metric provider package, init the provider and use them like the default metrics class -```python title="Using built-in Datadog Metrics Provider" -from aws_lambda_powertools.metrics.provider.datadog_provider_draft import DataDogProvider,DataDogMetrics - -dd_provider = DataDogProvider(namespace="default") -metrics = DataDogMetrics(provider=dd_provider) - -@metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) -def lambda_handler(event, context) - metrics.add_metric(name="item_sold",value=1,tags=["category:online"]) +```python hl_lines="1 3 4" title="Using built-in Datadog Metrics Provider" +--8<-- "examples/metrics/src/use_providers.py" ``` ## Testing your code diff --git a/examples/metrics/src/use_providers.py b/examples/metrics/src/use_providers.py new file mode 100644 index 00000000000..e7260d084c7 --- /dev/null +++ b/examples/metrics/src/use_providers.py @@ -0,0 +1,12 @@ +from aws_lambda_powertools.metrics.provider.datadog_provider_draft import ( + DataDogMetrics, + DataDogProvider, +) + +dd_provider = DataDogProvider(namespace="default") +metrics = DataDogMetrics(provider=dd_provider) + + +@metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) +def lambda_handler(event, context): + metrics.add_metric(name="item_sold", value=1, tags=["category:online"]) From f26e26dd0fc393521a2b38c3eebdd1449a06ebbc Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 20 Jun 2023 13:20:52 -0700 Subject: [PATCH 10/32] add OTel provider draft for poc --- .../metrics/provider/__init__.py | 6 ++ .../provider/opentelemetry_provider_draft.py | 100 ++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index 822c0275264..3f7451f88f0 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -8,6 +8,10 @@ DataDogMetrics, DataDogProvider, ) +from aws_lambda_powertools.metrics.provider.opentelemetry_provider_draft import ( + OTLPMetrics, + OTLPProvider, +) __all__ = [ "MetricsBase", @@ -17,4 +21,6 @@ "Metrics", "EphemeralMetrics", "CloudWatchEMF", + "OTLPProvider", + "OTLPMetrics", ] diff --git a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py new file mode 100644 index 00000000000..f78a1bf8bb3 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import logging +import numbers +import warnings +from typing import Dict, Optional + +from aws_lambda_powertools.metrics.exceptions import MetricValueError +from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase + +logger = logging.getLogger(__name__) + +# Check if using datadog layer +try: + from opentelemetry import metrics # type: ignore + from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( + OTLPMetricExporter, # type: ignore + ) + from opentelemetry.sdk.metrics import MeterProvider # type: ignore + from opentelemetry.sdk.metrics.export import ( + PeriodicExportingMetricReader, # type: ignore + ) + from opentelemetry.sdk.resources import SERVICE_NAME, Resource # type: ignore + + otlp_metrics = True +except ImportError: + otlp_metrics = False + + +class OTLPProvider(MetricsProviderBase): + """ + Class for OTLP provider. + + """ + + def __init__(self, namespace: str = "default", endpoint: str = "localhost:4317"): + if not otlp_metrics: + raise Exception("OTLP package not found") + resource = Resource(attributes={SERVICE_NAME: namespace}) + + reader = PeriodicExportingMetricReader(OTLPMetricExporter(endpoint=endpoint)) + provider = MeterProvider(resource=resource, metric_readers=[reader]) + # Sets the global default meter provider + metrics.set_meter_provider(provider) + self.meter = metrics.get_meter(namespace) + self.counters: Dict = {} + super().__init__() + + # adding name,value,timestamp,tags + # consider directly calling lambda_metric function here + def add_metric(self, name: str, value: float, unit: Optional[str] = "1", tags: Optional[Dict] = None): + if not isinstance(value, numbers.Real): + raise MetricValueError(f"{value} is not a valid number") + if name not in self.counters: + self.counters[name] = self.meter.create_counter(name=name, unit=unit) + self.counters[name].add(amount=value, attributes=tags) + + # serialize for flushing (Do we really need this function for datadog?) + def serialize(self): + # not implemented + pass + + # flush serialized data to output + def flush(self): + # not implemented + pass + + +class OTLPMetrics(MetricsBase): + """Class for datadog metrics standalone class. + + Example + ------- + dd_provider = DataDogProvider(namespace="default") + metrics = DataDogMetrics(provider=dd_provider) + + @metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) + def lambda_handler(event, context) + metrics.add_metric(name="item_sold",value=1,tags) + """ + + # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` + def __init__(self, provider): + self.provider = provider + super().__init__() + + # drop additional kwargs to keep same experience + def add_metric( + self, name: str, value: float, unit: Optional[str] = "1", tags: Optional[Dict] = None, *args, **kwargs + ): + self.provider.add_metric(name=name, value=value, unit=unit, tags=tags) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + if not self.provider.counters and raise_on_empty_metrics: + warnings.warn( + "No application metrics to publish. The cold-start metric may be published if enabled. " + "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", + stacklevel=2, + ) + # not implemented for OTPL From 5659cddefa32e420b199f59d302eac61e8d2f62a Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 27 Jun 2023 23:19:38 +0100 Subject: [PATCH 11/32] rebasing from upstream --- aws_lambda_powertools/metrics/provider/cloudwatch_emf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py index 777d4ca35b8..94ba62894a0 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py @@ -76,7 +76,7 @@ def __init__(self, service: Optional[str] = None, namespace: Optional[str] = Non self.dimension_set = self._dimensions self.dimension_set.update(**self._default_dimensions) - return super().__init__( + super().__init__( namespace=namespace, service=service, metric_set=self.metric_set, From 1dab8d131287f5f5d46e5f6a5907e1fc4466afca Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Tue, 27 Jun 2023 23:29:18 +0100 Subject: [PATCH 12/32] rebasing from upstream --- .../provider/datadog_provider_draft.py | 10 +- .../provider/opentelemetry_provider_draft.py | 18 +- poetry.lock | 252 +++++++++++++++++- pyproject.toml | 2 + 4 files changed, 265 insertions(+), 17 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py index eefa6083c2d..4b5511243e4 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py @@ -57,7 +57,7 @@ def serialize(self) -> List: "v": single_metric["v"], "e": single_metric["e"], "t": single_metric["t"], - } + }, ) return output_list @@ -104,7 +104,13 @@ def __init__(self, provider): # drop additional kwargs to keep same experience def add_metric( - self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None, *args, **kwargs + self, + name: str, + value: float, + timestamp: Optional[int] = None, + tags: Optional[List] = None, + *args, + **kwargs, ): self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags) diff --git a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py index f78a1bf8bb3..75cf52d599b 100644 --- a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py @@ -12,15 +12,15 @@ # Check if using datadog layer try: - from opentelemetry import metrics # type: ignore + from opentelemetry import metrics from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( - OTLPMetricExporter, # type: ignore + OTLPMetricExporter, ) - from opentelemetry.sdk.metrics import MeterProvider # type: ignore + from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( - PeriodicExportingMetricReader, # type: ignore + PeriodicExportingMetricReader, ) - from opentelemetry.sdk.resources import SERVICE_NAME, Resource # type: ignore + from opentelemetry.sdk.resources import SERVICE_NAME, Resource otlp_metrics = True except ImportError: @@ -86,7 +86,13 @@ def __init__(self, provider): # drop additional kwargs to keep same experience def add_metric( - self, name: str, value: float, unit: Optional[str] = "1", tags: Optional[Dict] = None, *args, **kwargs + self, + name: str, + value: float, + unit: Optional[str] = "1", + tags: Optional[Dict] = None, + *args, + **kwargs, ): self.provider.add_metric(name=name, value=value, unit=unit, tags=tags) diff --git a/poetry.lock b/poetry.lock index 7430c73a30b..06d63395315 100644 --- a/poetry.lock +++ b/poetry.lock @@ -216,6 +216,17 @@ files = [ botocore = ">=1.11.3" wrapt = "*" +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + [[package]] name = "bandit" version = "1.7.5" @@ -609,6 +620,23 @@ files = [ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] +[[package]] +name = "deprecated" +version = "1.2.14" +description = "Python @deprecated decorator to deprecate old python classes, functions or methods." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, +] + +[package.dependencies] +wrapt = ">=1.10,<2" + +[package.extras] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] + [[package]] name = "exceptiongroup" version = "1.1.1" @@ -722,6 +750,80 @@ files = [ gitdb = ">=4.0.1,<5" typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} +[[package]] +name = "googleapis-common-protos" +version = "1.59.1" +description = "Common protobufs used in Google APIs" +optional = false +python-versions = ">=3.7" +files = [ + {file = "googleapis-common-protos-1.59.1.tar.gz", hash = "sha256:b35d530fe825fb4227857bc47ad84c33c809ac96f312e13182bdeaa2abe1178a"}, + {file = "googleapis_common_protos-1.59.1-py2.py3-none-any.whl", hash = "sha256:0cbedb6fb68f1c07e18eb4c48256320777707e7d0c55063ae56c15db3224a61e"}, +] + +[package.dependencies] +protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" + +[package.extras] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] + +[[package]] +name = "grpcio" +version = "1.56.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.56.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:fb34ace11419f1ae321c36ccaa18d81cd3f20728cd191250be42949d6845bb2d"}, + {file = "grpcio-1.56.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:008767c0aed4899e657b50f2e0beacbabccab51359eba547f860e7c55f2be6ba"}, + {file = "grpcio-1.56.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:17f47aeb9be0da5337f9ff33ebb8795899021e6c0741ee68bd69774a7804ca86"}, + {file = "grpcio-1.56.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43c50d810cc26349b093bf2cfe86756ab3e9aba3e7e681d360930c1268e1399a"}, + {file = "grpcio-1.56.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:187b8f71bad7d41eea15e0c9812aaa2b87adfb343895fffb704fb040ca731863"}, + {file = "grpcio-1.56.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:881575f240eb5db72ddca4dc5602898c29bc082e0d94599bf20588fb7d1ee6a0"}, + {file = "grpcio-1.56.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c243b158dd7585021d16c50498c4b2ec0a64a6119967440c5ff2d8c89e72330e"}, + {file = "grpcio-1.56.0-cp310-cp310-win32.whl", hash = "sha256:8b3b2c7b5feef90bc9a5fa1c7f97637e55ec3e76460c6d16c3013952ee479cd9"}, + {file = "grpcio-1.56.0-cp310-cp310-win_amd64.whl", hash = "sha256:03a80451530fd3b8b155e0c4480434f6be669daf7ecba56f73ef98f94222ee01"}, + {file = "grpcio-1.56.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:64bd3abcf9fb4a9fa4ede8d0d34686314a7075f62a1502217b227991d9ca4245"}, + {file = "grpcio-1.56.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:fdc3a895791af4addbb826808d4c9c35917c59bb5c430d729f44224e51c92d61"}, + {file = "grpcio-1.56.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:4f84a6fd4482e5fe73b297d4874b62a535bc75dc6aec8e9fe0dc88106cd40397"}, + {file = "grpcio-1.56.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14e70b4dda3183abea94c72d41d5930c333b21f8561c1904a372d80370592ef3"}, + {file = "grpcio-1.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b5ce42a5ebe3e04796246ba50357f1813c44a6efe17a37f8dc7a5c470377312"}, + {file = "grpcio-1.56.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8219f17baf069fe8e42bd8ca0b312b875595e43a70cabf397be4fda488e2f27d"}, + {file = "grpcio-1.56.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:defdd14b518e6e468466f799aaa69db0355bca8d3a5ea75fb912d28ba6f8af31"}, + {file = "grpcio-1.56.0-cp311-cp311-win32.whl", hash = "sha256:50f4daa698835accbbcc60e61e0bc29636c0156ddcafb3891c987e533a0031ba"}, + {file = "grpcio-1.56.0-cp311-cp311-win_amd64.whl", hash = "sha256:59c4e606993a47146fbeaf304b9e78c447f5b9ee5641cae013028c4cca784617"}, + {file = "grpcio-1.56.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b1f4b6f25a87d80b28dd6d02e87d63fe1577fe6d04a60a17454e3f8077a38279"}, + {file = "grpcio-1.56.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:c2148170e01d464d41011a878088444c13413264418b557f0bdcd1bf1b674a0e"}, + {file = "grpcio-1.56.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:0409de787ebbf08c9d2bca2bcc7762c1efe72eada164af78b50567a8dfc7253c"}, + {file = "grpcio-1.56.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66f0369d27f4c105cd21059d635860bb2ea81bd593061c45fb64875103f40e4a"}, + {file = "grpcio-1.56.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38fdf5bd0a1c754ce6bf9311a3c2c7ebe56e88b8763593316b69e0e9a56af1de"}, + {file = "grpcio-1.56.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:79d4c5911d12a7aa671e5eb40cbb50a830396525014d2d6f254ea2ba180ce637"}, + {file = "grpcio-1.56.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5d2fc471668a7222e213f86ef76933b18cdda6a51ea1322034478df8c6519959"}, + {file = "grpcio-1.56.0-cp37-cp37m-win_amd64.whl", hash = "sha256:991224fd485e088d3cb5e34366053691a4848a6b7112b8f5625a411305c26691"}, + {file = "grpcio-1.56.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:c6f36621aabecbaff3e70c4d1d924c76c8e6a7ffec60c331893640a4af0a8037"}, + {file = "grpcio-1.56.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:1eadd6de258901929223f422ffed7f8b310c0323324caf59227f9899ea1b1674"}, + {file = "grpcio-1.56.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:72836b5a1d4f508ffbcfe35033d027859cc737972f9dddbe33fb75d687421e2e"}, + {file = "grpcio-1.56.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f92a99ab0c7772fb6859bf2e4f44ad30088d18f7c67b83205297bfb229e0d2cf"}, + {file = "grpcio-1.56.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa08affbf672d051cd3da62303901aeb7042a2c188c03b2c2a2d346fc5e81c14"}, + {file = "grpcio-1.56.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2db108b4c8e29c145e95b0226973a66d73ae3e3e7fae00329294af4e27f1c42"}, + {file = "grpcio-1.56.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8674fdbd28266d8efbcddacf4ec3643f76fe6376f73283fd63a8374c14b0ef7c"}, + {file = "grpcio-1.56.0-cp38-cp38-win32.whl", hash = "sha256:bd55f743e654fb050c665968d7ec2c33f03578a4bbb163cfce38024775ff54cc"}, + {file = "grpcio-1.56.0-cp38-cp38-win_amd64.whl", hash = "sha256:c63bc5ac6c7e646c296fed9139097ae0f0e63f36f0864d7ce431cce61fe0118a"}, + {file = "grpcio-1.56.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c0bc9dda550785d23f4f025be614b7faa8d0293e10811f0f8536cf50435b7a30"}, + {file = "grpcio-1.56.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:d596408bab632ec7b947761e83ce6b3e7632e26b76d64c239ba66b554b7ee286"}, + {file = "grpcio-1.56.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:76b6e6e1ee9bda32e6e933efd61c512e9a9f377d7c580977f090d1a9c78cca44"}, + {file = "grpcio-1.56.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7beb84ebd0a3f732625124b73969d12b7350c5d9d64ddf81ae739bbc63d5b1ed"}, + {file = "grpcio-1.56.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83ec714bbbe9b9502177c842417fde39f7a267031e01fa3cd83f1ca49688f537"}, + {file = "grpcio-1.56.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4feee75565d1b5ab09cb3a5da672b84ca7f6dd80ee07a50f5537207a9af543a4"}, + {file = "grpcio-1.56.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b4638a796778329cc8e142e4f57c705adb286b3ba64e00b0fa91eeb919611be8"}, + {file = "grpcio-1.56.0-cp39-cp39-win32.whl", hash = "sha256:437af5a7673bca89c4bc0a993382200592d104dd7bf55eddcd141cef91f40bab"}, + {file = "grpcio-1.56.0-cp39-cp39-win_amd64.whl", hash = "sha256:4241a1c2c76e748023c834995cd916570e7180ee478969c2d79a60ce007bc837"}, + {file = "grpcio-1.56.0.tar.gz", hash = "sha256:4c08ee21b3d10315b8dc26f6c13917b20ed574cdbed2d2d80c53d5508fdcc0f2"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.56.0)"] + [[package]] name = "h11" version = "0.14.0" @@ -895,13 +997,13 @@ files = [ [[package]] name = "importlib-metadata" -version = "6.7.0" +version = "6.0.1" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, - {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, + {file = "importlib_metadata-6.0.1-py3-none-any.whl", hash = "sha256:1543daade821c89b1c4a55986c326f36e54f2e6ca3bad96be4563d0acb74dcd4"}, + {file = "importlib_metadata-6.0.1.tar.gz", hash = "sha256:950127d57e35a806d520817d3e92eec3f19fdae9f0cd99da77a407c5aabefba3"}, ] [package.dependencies] @@ -911,7 +1013,7 @@ zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] [[package]] name = "importlib-resources" @@ -1031,7 +1133,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, - {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, ] [package.dependencies] @@ -1064,7 +1165,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, - {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -1609,6 +1709,102 @@ doc = ["nb2plots (>=0.6)", "numpydoc (>=1.1)", "pillow (>=8.2)", "pydata-sphinx- extra = ["lxml (>=4.5)", "pydot (>=1.4.1)", "pygraphviz (>=1.7)"] test = ["codecov (>=2.1)", "pytest (>=6.2)", "pytest-cov (>=2.12)"] +[[package]] +name = "opentelemetry-api" +version = "1.18.0" +description = "OpenTelemetry Python API" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_api-1.18.0-py3-none-any.whl", hash = "sha256:d05bcc94ec239fd76fd90d784c5e3ad081a8a1ac2ffc8a2c83a49ace052d1492"}, + {file = "opentelemetry_api-1.18.0.tar.gz", hash = "sha256:2bbf29739fcef268c419e3bf1735566c2e7f81026c14bcc78b62a0b97f8ecf2f"}, +] + +[package.dependencies] +deprecated = ">=1.2.6" +importlib-metadata = ">=6.0.0,<6.1.0" +setuptools = ">=16.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-common" +version = "1.18.0" +description = "OpenTelemetry Protobuf encoding" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp_proto_common-1.18.0-py3-none-any.whl", hash = "sha256:276073ccc8c6e6570fe05ca8ca0de77d662bc89bc614ec8bfbc855112f7e25e3"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.18.0.tar.gz", hash = "sha256:4d9883d6929aabe75e485950bbe8b149a14d95e50b1570426832daa6913b0871"}, +] + +[package.dependencies] +opentelemetry-proto = "1.18.0" + +[[package]] +name = "opentelemetry-exporter-otlp-proto-grpc" +version = "1.18.0" +description = "OpenTelemetry Collector Protobuf over gRPC Exporter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_exporter_otlp_proto_grpc-1.18.0-py3-none-any.whl", hash = "sha256:c773bc9df2c9d6464f0d5936963399b2fc440f0616c1277f29512d540ad7e0a2"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.18.0.tar.gz", hash = "sha256:8eddfde4267da876871e62f1b58369986bdb7e47e43032c498f1ea807d7191c4"}, +] + +[package.dependencies] +backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} +deprecated = ">=1.2.6" +googleapis-common-protos = ">=1.52,<2.0" +grpcio = ">=1.0.0,<2.0.0" +opentelemetry-api = ">=1.15,<2.0" +opentelemetry-exporter-otlp-proto-common = "1.18.0" +opentelemetry-proto = "1.18.0" +opentelemetry-sdk = ">=1.18.0,<1.19.0" + +[package.extras] +test = ["pytest-grpc"] + +[[package]] +name = "opentelemetry-proto" +version = "1.18.0" +description = "OpenTelemetry Python Proto" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_proto-1.18.0-py3-none-any.whl", hash = "sha256:34d1c49283f0246a58761d9322d5a79702a09afda0bb181bb6378ed26862e446"}, + {file = "opentelemetry_proto-1.18.0.tar.gz", hash = "sha256:4f38d01049c3926b9fd09833574bfb5e172d84c8ca85e2ab7f4b5a198d75aeef"}, +] + +[package.dependencies] +protobuf = ">=3.19,<5.0" + +[[package]] +name = "opentelemetry-sdk" +version = "1.18.0" +description = "OpenTelemetry Python SDK" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_sdk-1.18.0-py3-none-any.whl", hash = "sha256:a097cc1e0db6ff33b4d250a9350dc17975d24a22aa667fca2866e60c51306723"}, + {file = "opentelemetry_sdk-1.18.0.tar.gz", hash = "sha256:cd3230930a2ab288b1df149d261e9cd2bd48dee54ad18465a777831cb6779e90"}, +] + +[package.dependencies] +opentelemetry-api = "1.18.0" +opentelemetry-semantic-conventions = "0.39b0" +setuptools = ">=16.0" +typing-extensions = ">=3.7.4" + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.39b0" +description = "OpenTelemetry Semantic Conventions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "opentelemetry_semantic_conventions-0.39b0-py3-none-any.whl", hash = "sha256:0dd7a9dc0dfde2335f643705bba8f7c44182c797bc208b7601f0b8e8211cfd5c"}, + {file = "opentelemetry_semantic_conventions-0.39b0.tar.gz", hash = "sha256:06a9f198574e0dab6ebc072b59d89092cf9f115638a8a02157586769b6b7a69a"}, +] + [[package]] name = "packaging" version = "23.1" @@ -1628,7 +1824,7 @@ optional = false python-versions = ">=3.7" files = [ {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"}, - {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798dERA001fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, + {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"}, ] [[package]] @@ -1704,6 +1900,28 @@ importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "protobuf" +version = "4.23.3" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "protobuf-4.23.3-cp310-abi3-win32.whl", hash = "sha256:514b6bbd54a41ca50c86dd5ad6488afe9505901b3557c5e0f7823a0cf67106fb"}, + {file = "protobuf-4.23.3-cp310-abi3-win_amd64.whl", hash = "sha256:cc14358a8742c4e06b1bfe4be1afbdf5c9f6bd094dff3e14edb78a1513893ff5"}, + {file = "protobuf-4.23.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2991f5e7690dab569f8f81702e6700e7364cc3b5e572725098215d3da5ccc6ac"}, + {file = "protobuf-4.23.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:08fe19d267608d438aa37019236db02b306e33f6b9902c3163838b8e75970223"}, + {file = "protobuf-4.23.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3b01a5274ac920feb75d0b372d901524f7e3ad39c63b1a2d55043f3887afe0c1"}, + {file = "protobuf-4.23.3-cp37-cp37m-win32.whl", hash = "sha256:aca6e86a08c5c5962f55eac9b5bd6fce6ed98645d77e8bfc2b952ecd4a8e4f6a"}, + {file = "protobuf-4.23.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0149053336a466e3e0b040e54d0b615fc71de86da66791c592cc3c8d18150bf8"}, + {file = "protobuf-4.23.3-cp38-cp38-win32.whl", hash = "sha256:84ea0bd90c2fdd70ddd9f3d3fc0197cc24ecec1345856c2b5ba70e4d99815359"}, + {file = "protobuf-4.23.3-cp38-cp38-win_amd64.whl", hash = "sha256:3bcbeb2bf4bb61fe960dd6e005801a23a43578200ea8ceb726d1f6bd0e562ba1"}, + {file = "protobuf-4.23.3-cp39-cp39-win32.whl", hash = "sha256:5cb9e41188737f321f4fce9a4337bf40a5414b8d03227e1d9fbc59bc3a216e35"}, + {file = "protobuf-4.23.3-cp39-cp39-win_amd64.whl", hash = "sha256:29660574cd769f2324a57fb78127cda59327eb6664381ecfe1c69731b83e8288"}, + {file = "protobuf-4.23.3-py3-none-any.whl", hash = "sha256:447b9786ac8e50ae72cae7a2eec5c5df6a9dbf9aa6f908f1b8bda6032644ea62"}, + {file = "protobuf-4.23.3.tar.gz", hash = "sha256:7a92beb30600332a52cdadbedb40d33fd7c8a0d7f549c440347bc606fb3fe34b"}, +] + [[package]] name = "publication" version = "0.0.3" @@ -2383,6 +2601,22 @@ starlette = ["starlette (>=0.19.1)"] starlite = ["starlite (>=1.48)"] tornado = ["tornado (>=5)"] +[[package]] +name = "setuptools" +version = "68.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, + {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + [[package]] name = "six" version = "1.16.0" @@ -2624,7 +2858,7 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, @@ -2745,4 +2979,4 @@ validation = ["fastjsonschema"] [metadata] lock-version = "2.0" python-versions = "^3.7.4" -content-hash = "27fb7818ae11e6ca12cddc942f555a8d28517c9efb776f188a22a365f1bd36ed" +content-hash = "32991d0ef11644aa82a70cd269c962dd7ea813377748e046f1e3ad1d8551c2eb" diff --git a/pyproject.toml b/pyproject.toml index aa5c3db2abd..cebb3eb934d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,6 +33,8 @@ fastjsonschema = { version = "^2.14.5", optional = true } pydantic = { version = "^1.8.2", optional = true } boto3 = { version = "^1.20.32", optional = true } typing-extensions = "^4.6.2" +opentelemetry-sdk = "^1.18.0" +opentelemetry-exporter-otlp-proto-grpc = "^1.18.0" [tool.poetry.dev-dependencies] coverage = {extras = ["toml"], version = "^7.2"} From c203c3d553a07b065f3edd6c82349f7d1f9ff187 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Wed, 28 Jun 2023 15:27:01 -0700 Subject: [PATCH 13/32] add test to metrics providers --- .../metrics/provider/base.py | 2 +- tests/functional/test_metrics.py | 137 ++++++++++++++++++ 2 files changed, 138 insertions(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 2afb2d68dfd..f482e3e3f01 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -123,6 +123,6 @@ def _add_cold_start_metric(self, context: Any) -> None: return logger.debug("Adding cold start metric and function_name dimension") - self.add_metric(name="ColdStart", value=1) + self.add_metric(name="ColdStart", value=1, tag=[{"function_name": context.function_name}]) is_cold_start = False diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index d7063f88744..d3298f2ed69 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -20,6 +20,7 @@ MetricManager, reset_cold_start_flag, ) +from aws_lambda_powertools.metrics.provider import CloudWatchEMF, MetricsBase, MetricsProviderBase @pytest.fixture(scope="function", autouse=True) @@ -1171,3 +1172,139 @@ def lambda_handler(evt, ctx): output = capture_metrics_output_multiple_emf_objects(capsys) assert len(output) == 2 + + +@pytest.fixture +def metrics_provider() -> MetricsProviderBase: + class MetricsProvider(MetricsProviderBase): + def __init__(self): + self.metric_store: List = [] + self.result: str + super().__init__() + + def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): + self.metric_store.append({"name": name, "value": value, "tag": tag}) + + def serialize(self, raise_on_empty_metrics: bool = False, *args, **kwargs): + if raise_on_empty_metrics and len(self.metric_store) == 0: + raise SchemaValidationError("Must contain at least one metric.") + + self.result = json.dumps(self.metric_store) + + def flush(self, *args, **kwargs): + print(self.result) + + def clear(self): + self.result = "" + self.metric_store = [] + + return MetricsProvider + + +@pytest.fixture +def metrics_class() -> MetricsBase: + class MetricsClass(MetricsBase): + def __init__(self, provider): + self.provider = provider + super().__init__() + + def add_metric(self, name: str, value: float, tag: List = None, *args, **kwargs): + self.provider.add_metric(name=name, value=value, tag=tag) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + self.provider.serialize(raise_on_empty_metrics=raise_on_empty_metrics) + self.provider.flush() + self.provider.clear() + + return MetricsClass + + +def test_cloudwatch_emf(namespace): + assert CloudWatchEMF == Metrics + + +def test_metrics_provider_basic(capsys, metrics_provider, metric): + provider = metrics_provider() + provider.add_metric(**metric) + provider.serialize() + provider.flush() + output = capture_metrics_output(capsys) + assert output[0]["name"] == metric["name"] + assert output[0]["value"] == metric["value"] + + +def test_metrics_provider_class_basic(capsys, metrics_provider, metrics_class, metric): + metrics = metrics_class(provider=metrics_provider()) + metrics.add_metric(**metric) + metrics.flush_metrics() + output = capture_metrics_output(capsys) + assert output[0]["name"] == metric["name"] + assert output[0]["value"] == metric["value"] + + +def test_metrics_provider_class_decorate(metrics_class, metrics_provider): + # GIVEN Metrics is initialized + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used to serialize metrics + @my_metrics.log_metrics + def lambda_handler(evt, context): + return True + + # THEN log_metrics should invoke the function it decorates + # and return no error if we have a namespace and dimension + assert lambda_handler({}, {}) is True + + +def test_metrics_provider_class_coldstart(capsys, metrics_provider, metrics_class): + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used with capture_cold_start_metric + @my_metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, context): + pass + + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + + output = capture_metrics_output(capsys) + + # THEN ColdStart metric and function_name and service dimension should be logged + assert output[0]["name"] == "ColdStart" + assert output[0]["value"] == 1 + assert output[0]["tag"] == [{"function_name": "example_fn"}] + + +def test_metrics_provider_class_no_coldstart(capsys, metrics_provider, metrics_class): + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used with capture_cold_start_metric + @my_metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, context): + pass + + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + _ = capture_metrics_output(capsys) + # drop first one + + lambda_handler({}, LambdaContext("example_fn")) + output = capture_metrics_output(capsys) + + # no coldstart is here + assert "ColdStart" not in json.dumps(output) + + +def test_metric_provider_raise_on_empty_metrics(metrics_provider, metrics_class): + # GIVEN Metrics is initialized + my_metrics = metrics_class(provider=metrics_provider()) + + # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics + @my_metrics.log_metrics(raise_on_empty_metrics=True) + def lambda_handler(evt, context): + pass + + # THEN the raised exception should be SchemaValidationError + # and specifically about the lack of Metrics + with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): + lambda_handler({}, {}) From 9cde8e3c9cdc3603ad05110f204e4e92f1fade3f Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 12 Jul 2023 00:26:11 +0100 Subject: [PATCH 14/32] docstring + code coverage --- .../metrics/provider/base.py | 121 ++++++++++++++++-- .../provider/opentelemetry_provider_draft.py | 4 +- aws_lambda_powertools/shared/user_agent.py | 6 +- 3 files changed, 115 insertions(+), 16 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index f482e3e3f01..e65477f8f52 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -9,42 +9,141 @@ class MetricsProviderBase(ABC): - """Class for metric provider template + """ + Class for metric provider template. - Use this template to create your own metric provider. + This class serves as a template for creating your own metric provider. Inherit from this class + and implement the required methods to define your specific metric provider. + Usage: + 1. Inherit from this class. + 2. Implement the required methods specific to your metric provider. + 3. Customize the behavior and functionality of the metric provider in your subclass. """ - # General add metric function. Should return combined metrics Dict @abstractmethod def add_metric(self, *args, **kwargs): - pass + """ + Abstract method for adding a metric. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Returns + ---------- + Dict + A combined metrics dictionary. + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError - # serialize and return dict for flushing @abstractmethod def serialize(self, *args, **kwargs): - pass + """ + Abstract method for serialize a metric. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Returns + ---------- + Dict + Serialized metrics + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError # flush serialized data to output, or send to API directly @abstractmethod def flush(self, *args, **kwargs): - pass + """ + Abstract method for flushing a metric. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError class MetricsBase(ABC): - """Class for metric template + """ + Class for metric template. - Use this template to create your own metric class. + This class serves as a template for creating your own metric class. Inherit from this class + and implement the necessary methods to define your specific metric. + NOTE: need to improve this docstring """ @abstractmethod def add_metric(self, *args, **kwargs): - pass + """ + Abstract method for adding a metric. + + This method must be implemented in subclasses to add a metric and return a combined metrics dictionary. + + Parameters + ---------- + *args: + Positional arguments. + *kwargs: + Keyword arguments. + + Returns + ---------- + Dict + A combined metrics dictionary. + + Raises + ---------- + NotImplementedError + This method must be implemented in subclasses. + """ + raise NotImplementedError @abstractmethod def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - pass + """Manually flushes the metrics. This is normally not necessary, + unless you're running on other runtimes besides Lambda, where the @log_metrics + decorator already handles things for you. + + Parameters + ---------- + raise_on_empty_metrics : bool, optional + raise exception if no metrics are emitted, by default False + """ + raise NotImplementedError def log_metrics( self, diff --git a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py index 75cf52d599b..0882bb3ea45 100644 --- a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py +++ b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py @@ -71,8 +71,8 @@ class OTLPMetrics(MetricsBase): Example ------- - dd_provider = DataDogProvider(namespace="default") - metrics = DataDogMetrics(provider=dd_provider) + dd_provider = OTLPProvider(namespace="default") + metrics = OTLPMetrics(provider=dd_provider) @metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) def lambda_handler(event, context) diff --git a/aws_lambda_powertools/shared/user_agent.py b/aws_lambda_powertools/shared/user_agent.py index 098be7a503a..c682c24b34f 100644 --- a/aws_lambda_powertools/shared/user_agent.py +++ b/aws_lambda_powertools/shared/user_agent.py @@ -112,7 +112,7 @@ def register_feature_to_session(session, feature): def register_feature_to_botocore_session(botocore_session, feature): """ Register the given feature string to the event system of the provided botocore session - + Please notice this function is for patching botocore session and is different from previous one which is for patching boto3 session @@ -127,7 +127,7 @@ def register_feature_to_botocore_session(botocore_session, feature): ------ AttributeError If the provided session does not have an event system. - + Examples -------- **register data-masking user-agent to botocore session** @@ -139,7 +139,7 @@ def register_feature_to_botocore_session(botocore_session, feature): >>> session = botocore.session.Session() >>> register_feature_to_botocore_session(botocore_session=session, feature="data-masking") >>> key_provider = StrictAwsKmsMasterKeyProvider(key_ids=self.keys, botocore_session=session) - + """ try: botocore_session.register(TARGET_SDK_EVENT, _create_feature_function(feature)) From 5bdb2c2601c94a4e9342126a2e97c186d32ee52e Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Wed, 12 Jul 2023 00:39:07 +0100 Subject: [PATCH 15/32] python annotations + imports --- aws_lambda_powertools/metrics/__init__.py | 8 ++-- aws_lambda_powertools/metrics/base.py | 46 ++++++++++--------- aws_lambda_powertools/metrics/metric.py | 2 +- aws_lambda_powertools/metrics/metrics.py | 4 +- .../metrics/provider/base.py | 6 ++- .../metrics/provider/cloudwatch_emf.py | 8 ++-- 6 files changed, 40 insertions(+), 34 deletions(-) diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index 8227ca410d0..c9218af1ab5 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -1,14 +1,14 @@ """CloudWatch Embedded Metric Format utility """ -from .base import MetricResolution, MetricUnit -from .exceptions import ( +from aws_lambda_powertools.metrics.base import MetricResolution, MetricUnit +from aws_lambda_powertools.metrics.exceptions import ( MetricResolutionError, MetricUnitError, MetricValueError, SchemaValidationError, ) -from .metric import single_metric -from .provider.cloudwatch_emf import CloudWatchEMF, EphemeralMetrics, Metrics +from aws_lambda_powertools.metrics.metric import single_metric +from aws_lambda_powertools.metrics.provider.cloudwatch_emf import CloudWatchEMF, EphemeralMetrics, Metrics __all__ = [ "Metrics", diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 6a5e7282392..27d76a46939 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import datetime import functools import json @@ -10,15 +12,15 @@ from enum import Enum from typing import Any, Callable, Dict, Generator, List, Optional, Union -from ..shared import constants -from ..shared.functions import resolve_env_var_choice -from .exceptions import ( +from aws_lambda_powertools.metrics.exceptions import ( MetricResolutionError, MetricUnitError, MetricValueError, SchemaValidationError, ) -from .types import MetricNameUnitResolution +from aws_lambda_powertools.metrics.types import MetricNameUnitResolution +from aws_lambda_powertools.shared import constants +from aws_lambda_powertools.shared.functions import resolve_env_var_choice logger = logging.getLogger(__name__) @@ -94,11 +96,11 @@ class MetricManager: def __init__( self, - metric_set: Optional[Dict[str, Any]] = None, - dimension_set: Optional[Dict] = None, - namespace: Optional[str] = None, - metadata_set: Optional[Dict[str, Any]] = None, - service: Optional[str] = None, + metric_set: Dict[str, Any] | None = None, + dimension_set: Dict | None = None, + namespace: str | None = None, + metadata_set: Dict[str, Any] | None = None, + service: str | None = None, ): self.metric_set = metric_set if metric_set is not None else {} self.dimension_set = dimension_set if dimension_set is not None else {} @@ -112,9 +114,9 @@ def __init__( def add_metric( self, name: str, - unit: Union[MetricUnit, str], + unit: MetricUnit | str, value: float, - resolution: Union[MetricResolution, int] = 60, + resolution: MetricResolution | int = 60, ) -> None: """Adds given metric @@ -173,9 +175,9 @@ def add_metric( def serialize_metric_set( self, - metrics: Optional[Dict] = None, - dimensions: Optional[Dict] = None, - metadata: Optional[Dict] = None, + metrics: Dict | None = None, + dimensions: Dict | None = None, + metadata: Dict | None = None, ) -> Dict: """Serializes metric and dimensions set @@ -355,10 +357,10 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: def log_metrics( self, - lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, - default_dimensions: Optional[Dict[str, str]] = None, + default_dimensions: Dict[str, str] | None = None, ): """Decorator to serialize and publish metrics at the end of a function execution. @@ -537,9 +539,9 @@ class SingleMetric(MetricManager): def add_metric( self, name: str, - unit: Union[MetricUnit, str], + unit: MetricUnit | str, value: float, - resolution: Union[MetricResolution, int] = 60, + resolution: MetricResolution | int = 60, ) -> None: """Method to prevent more than one metric being created @@ -565,9 +567,9 @@ def single_metric( name: str, unit: MetricUnit, value: float, - resolution: Union[MetricResolution, int] = 60, - namespace: Optional[str] = None, - default_dimensions: Optional[Dict[str, str]] = None, + resolution: MetricResolution | int = 60, + namespace: str | None = None, + default_dimensions: Dict[str, str] | None = None, ) -> Generator[SingleMetric, None, None]: """Context manager to simplify creation of a single metric @@ -622,7 +624,7 @@ def single_metric( SchemaValidationError When metric object fails EMF schema validation """ # noqa: E501 - metric_set: Optional[Dict] = None + metric_set: Dict | None = None try: metric: SingleMetric = SingleMetric(namespace=namespace) metric.add_metric(name=name, unit=unit, value=value, resolution=resolution) diff --git a/aws_lambda_powertools/metrics/metric.py b/aws_lambda_powertools/metrics/metric.py index 5465889f1f0..e2ac49df489 100644 --- a/aws_lambda_powertools/metrics/metric.py +++ b/aws_lambda_powertools/metrics/metric.py @@ -1,4 +1,4 @@ # NOTE: prevents circular inheritance import -from .base import SingleMetric, single_metric +from aws_lambda_powertools.metrics.base import SingleMetric, single_metric __all__ = ["SingleMetric", "single_metric"] diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 0c8ac73cdd1..859ae069103 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -1,4 +1,4 @@ # NOTE: keeps for compatibility -from .provider.cloudwatch_emf import EphemeralMetrics, Metrics +from aws_lambda_powertools.metrics.provider.cloudwatch_emf import CloudWatchEMF, EphemeralMetrics, Metrics -__all__ = ["Metrics", "EphemeralMetrics"] +__all__ = ["Metrics", "EphemeralMetrics", "CloudWatchEMF"] diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index e65477f8f52..b505b51e309 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -1,7 +1,9 @@ +from __future__ import annotations + import functools import logging from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, Optional, Union +from typing import Any, Callable, Dict, Optional logger = logging.getLogger(__name__) @@ -147,7 +149,7 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: def log_metrics( self, - lambda_handler: Union[Callable[[Dict, Any], Any], Optional[Callable[[Dict, Any, Optional[Dict]], Any]]] = None, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, capture_cold_start_metric: bool = False, raise_on_empty_metrics: bool = False, ): diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py index 94ba62894a0..1f5a55ff717 100644 --- a/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf.py @@ -1,4 +1,6 @@ -from typing import Any, Dict, Optional +from __future__ import annotations + +from typing import Any, Dict from aws_lambda_powertools.metrics.base import MetricManager @@ -69,7 +71,7 @@ def lambda_handler(): _metadata: Dict[str, Any] = {} _default_dimensions: Dict[str, Any] = {} - def __init__(self, service: Optional[str] = None, namespace: Optional[str] = None): + def __init__(self, service: str | None = None, namespace: str | None = None): self.metric_set = self._metrics self.metadata_set = self._metadata self.default_dimensions = self._default_dimensions @@ -132,5 +134,5 @@ class EphemeralMetrics(MetricManager): - Create the same metrics with different dimensions more than once """ - def __init__(self, service: Optional[str] = None, namespace: Optional[str] = None): + def __init__(self, service: str | None = None, namespace: str | None = None): super().__init__(namespace=namespace, service=service) From 3673c4b79daf8ed9b63edbd62accb0680bcb7fd5 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Thu, 13 Jul 2023 11:31:47 -0700 Subject: [PATCH 16/32] fix docstring polish datadog_provider add flush_to_log parameter remove OTEL provider draft --- .../metrics/provider/__init__.py | 9 +- .../metrics/provider/datadog_provider.py | 232 ++++++++++++++++++ .../provider/datadog_provider_draft.py | 126 ---------- .../provider/opentelemetry_provider_draft.py | 106 -------- 4 files changed, 233 insertions(+), 240 deletions(-) create mode 100644 aws_lambda_powertools/metrics/provider/datadog_provider.py delete mode 100644 aws_lambda_powertools/metrics/provider/datadog_provider_draft.py delete mode 100644 aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index e9024797e6c..a06007a7918 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -5,14 +5,10 @@ Metrics, ) from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase -from aws_lambda_powertools.metrics.provider.datadog_provider_draft import ( +from aws_lambda_powertools.metrics.provider.datadog_provider import ( DataDogMetrics, DataDogProvider, ) -from aws_lambda_powertools.metrics.provider.opentelemetry_provider_draft import ( - OTLPMetrics, - OTLPProvider, -) __all__ = [ "MetricsBase", @@ -23,7 +19,4 @@ "AmazonCloudWatchEMF", "EphemeralAmazonCloudWatchEMF", "EphemeralMetrics", - "CloudWatchEMF", - "OTLPProvider", - "OTLPMetrics", ] diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py new file mode 100644 index 00000000000..a8300451d1e --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/datadog_provider.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import json +import logging +import numbers +import os +import time +import warnings +from typing import List, Optional + +from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError +from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase + +logger = logging.getLogger(__name__) + +# Check if using datadog layer +try: + from datadog_lambda.metric import lambda_metric # type: ignore +except ImportError: + lambda_metric = None + +DEFAULT_NAMESPACE = "default" + + +class DataDogProvider(MetricsProviderBase): + """ + Class for datadog provider. This Class should only be used inside DataDogMetrics + all datadog metric data will be stored as + { + "m": metric_name, + "v": value, + "e": timestamp + "t": List["tag:value","tag2:value2"] + } + see https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 + + Examples + -------- + + """ + + def __init__(self, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = False): + """ + + Parameters + ---------- + namespace: str + For datadog, namespace will be appended in front of the metrics name in metrics exported. + (namespace.metrics_name) + flush_to_log: bool + Flush datadog metrics to log (collect with log forwarder) rather than using datadog extension + """ + self.metrics: List = [] + self.namespace: str = namespace + # either is true then flush to log + self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log + super().__init__() + + # adding name,value,timestamp,tags + def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None): + """ + The add_metrics function that will be used by metrics class. + + Parameters + ---------- + name: str + Name/Key for the metrics + value: float + Value for the metrics + timestamp: int + Timestamp in int for the metrics, default = time.time() + tags: List[str] + In format like List["tag:value","tag2:value2"] + + Examples + -------- + add_metric( + name='coffee_house.order_value', + value=12.45, + tags=['product:latte', 'order:online'] + ) + """ + if not isinstance(value, numbers.Real): + raise MetricValueError(f"{value} is not a valid number") + if not timestamp: + timestamp = int(time.time()) + self.metrics.append({"m": name, "v": value, "e": timestamp, "t": tags}) + + def serialize(self) -> List: + output_list: List = [] + + for single_metric in self.metrics: + if self.namespace != DEFAULT_NAMESPACE: + metric_name = f"{self.namespace}.{single_metric['m']}" + else: + metric_name = single_metric["m"] + output_list.append( + { + "m": metric_name, + "v": single_metric["v"], + "e": single_metric["e"], + "t": single_metric["t"], + }, + ) + + return output_list + + # flush serialized data to output + def flush(self, metrics: List): + """ + + Parameters + ---------- + metrics: List[Dict] + [{ + "m": metric_name, + "v": value, + "e": timestamp + "t": List["tag:value","tag2:value2"] + }] + + Raises + ------- + SchemaValidationError + When metric object fails EMF schema validation + """ + if len(metrics) == 0: + raise SchemaValidationError("Must contain at least one metric.") + # submit through datadog extension + if lambda_metric and self.flush_to_log is False: + # use lambda_metric function from datadog package, submit metrics to datadog + for metric_item in metrics: + lambda_metric( + metric_name=metric_item["m"], + value=metric_item["v"], + timestamp=metric_item["e"], + tags=metric_item["t"], + ) + else: + # dd module not found: flush to log, this format can be recognized via datadog log forwarder + # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 + for metric_item in metrics: + print(json.dumps(metric_item, separators=(",", ":"))) + + def clear(self): + self.metrics = [] + + +class DataDogMetrics(MetricsBase): + """ + Class for datadog metrics + + Parameters + ---------- + provider: MetricsProviderBase + The datadog provider which will be used to process metrics data + + Example + ------- + **Creates a few metrics and publish at the end of a function execution** + + from aws_lambda_powertools.metrics.provider import DataDogMetrics, DataDogProvider + + dd_provider = DataDogProvider(namespace="Serverlesspresso") + metrics = DataDogMetrics(provider=dd_provider) + + @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) + def lambda_handler(event, context) + metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online']) + """ + + # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` + def __init__(self, provider: MetricsProviderBase): + self.provider = provider + super().__init__() + + # drop additional kwargs to keep same experience + def add_metric( + self, + name: str, + value: float, + timestamp: Optional[int] = None, + tags: Optional[List] = None, + *args, + **kwargs, + ): + """ + The add_metrics function that will be used by metrics class. + + Parameters + ---------- + name: str + Name/Key for the metrics + value: float + Value for the metrics + timestamp: int + Timestamp in int for the metrics, default = time.time() + tags: List[str] + In format like List["tag:value","tag2:value2"] + + Examples + -------- + add_metric( + name='coffee_house.order_value', + value=12.45, + tags=['product:latte', 'order:online'] + ) + """ + self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + """ + Manually flushes the metrics. This is normally not necessary, + unless you're running on other runtimes besides Lambda, where the @log_metrics + decorator already handles things for you. + + Parameters + ---------- + raise_on_empty_metrics: bool + raise exception if no metrics are emitted, by default False + """ + metrics = self.provider.serialize() + if not metrics and not raise_on_empty_metrics: + warnings.warn( + "No application metrics to publish. The cold-start metric may be published if enabled. " + "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", + stacklevel=2, + ) + else: + # will raise on empty metrics + self.provider.flush(metrics) + self.provider.clear() diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py b/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py deleted file mode 100644 index 4b5511243e4..00000000000 --- a/aws_lambda_powertools/metrics/provider/datadog_provider_draft.py +++ /dev/null @@ -1,126 +0,0 @@ -from __future__ import annotations - -import json -import logging -import numbers -import time -import warnings -from typing import List, Optional - -from aws_lambda_powertools.metrics.exceptions import MetricValueError -from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase - -logger = logging.getLogger(__name__) - -# Check if using datadog layer -try: - from datadog_lambda.metric import lambda_metric # type: ignore -except ImportError: - lambda_metric = None - - -class DataDogProvider(MetricsProviderBase): - """Class for datadog provider. - all datadog metric data will be stored as - see https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 - { - "m": metric_name, - "v": value, - "e": timestamp - "t": List["tag:value","tag2:value2"] - } - """ - - def __init__(self, namespace: str = "default"): - self.metrics: List = [] - self.namespace: str = namespace - super().__init__() - - # adding name,value,timestamp,tags - # consider directly calling lambda_metric function here - def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None): - if not isinstance(value, numbers.Real): - raise MetricValueError(f"{value} is not a valid number") - if not timestamp: - timestamp = time.time() - self.metrics.append({"m": name, "v": int(value), "e": timestamp, "t": tags}) - - # serialize for flushing (Do we really need this function for datadog?) - def serialize(self) -> List: - # logic here is to add dimension and metadata to each metric's tag with "key:value" format - output_list: List = [] - - for single_metric in self.metrics: - output_list.append( - { - "m": f"{self.namespace}.{single_metric['m']}", - "v": single_metric["v"], - "e": single_metric["e"], - "t": single_metric["t"], - }, - ) - - return output_list - - # flush serialized data to output - def flush(self, metrics: List): - # submit through datadog extension - if lambda_metric: - # use lambda_metric function from datadog package, submit metrics to datadog - for metric_item in metrics: - lambda_metric( - metric_name=metric_item["m"], - value=metric_item["v"], - timestamp=metric_item["e"], - tags=metric_item["t"], - ) - else: - # flush to log with datadog format - # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 - for metric_item in metrics: - print(json.dumps(metric_item, separators=(",", ":"))) - - def clear(self): - self.metrics = [] - - -class DataDogMetrics(MetricsBase): - """Class for datadog metrics standalone class. - - Example - ------- - dd_provider = DataDogProvider(namespace="default") - metrics = DataDogMetrics(provider=dd_provider) - - @metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) - def lambda_handler(event, context) - metrics.add_metric(name="item_sold",value=1,tags) - """ - - # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` - def __init__(self, provider): - self.provider = provider - super().__init__() - - # drop additional kwargs to keep same experience - def add_metric( - self, - name: str, - value: float, - timestamp: Optional[int] = None, - tags: Optional[List] = None, - *args, - **kwargs, - ): - self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - metrics = self.provider.serialize() - if not metrics and raise_on_empty_metrics: - warnings.warn( - "No application metrics to publish. The cold-start metric may be published if enabled. " - "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", - stacklevel=2, - ) - self.provider.flush(metrics) - self.provider.clear() diff --git a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py b/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py deleted file mode 100644 index 0882bb3ea45..00000000000 --- a/aws_lambda_powertools/metrics/provider/opentelemetry_provider_draft.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import annotations - -import logging -import numbers -import warnings -from typing import Dict, Optional - -from aws_lambda_powertools.metrics.exceptions import MetricValueError -from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase - -logger = logging.getLogger(__name__) - -# Check if using datadog layer -try: - from opentelemetry import metrics - from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( - OTLPMetricExporter, - ) - from opentelemetry.sdk.metrics import MeterProvider - from opentelemetry.sdk.metrics.export import ( - PeriodicExportingMetricReader, - ) - from opentelemetry.sdk.resources import SERVICE_NAME, Resource - - otlp_metrics = True -except ImportError: - otlp_metrics = False - - -class OTLPProvider(MetricsProviderBase): - """ - Class for OTLP provider. - - """ - - def __init__(self, namespace: str = "default", endpoint: str = "localhost:4317"): - if not otlp_metrics: - raise Exception("OTLP package not found") - resource = Resource(attributes={SERVICE_NAME: namespace}) - - reader = PeriodicExportingMetricReader(OTLPMetricExporter(endpoint=endpoint)) - provider = MeterProvider(resource=resource, metric_readers=[reader]) - # Sets the global default meter provider - metrics.set_meter_provider(provider) - self.meter = metrics.get_meter(namespace) - self.counters: Dict = {} - super().__init__() - - # adding name,value,timestamp,tags - # consider directly calling lambda_metric function here - def add_metric(self, name: str, value: float, unit: Optional[str] = "1", tags: Optional[Dict] = None): - if not isinstance(value, numbers.Real): - raise MetricValueError(f"{value} is not a valid number") - if name not in self.counters: - self.counters[name] = self.meter.create_counter(name=name, unit=unit) - self.counters[name].add(amount=value, attributes=tags) - - # serialize for flushing (Do we really need this function for datadog?) - def serialize(self): - # not implemented - pass - - # flush serialized data to output - def flush(self): - # not implemented - pass - - -class OTLPMetrics(MetricsBase): - """Class for datadog metrics standalone class. - - Example - ------- - dd_provider = OTLPProvider(namespace="default") - metrics = OTLPMetrics(provider=dd_provider) - - @metrics.log_metrics(capture_cold_start_metric: bool = True, raise_on_empty_metrics: bool = False) - def lambda_handler(event, context) - metrics.add_metric(name="item_sold",value=1,tags) - """ - - # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` - def __init__(self, provider): - self.provider = provider - super().__init__() - - # drop additional kwargs to keep same experience - def add_metric( - self, - name: str, - value: float, - unit: Optional[str] = "1", - tags: Optional[Dict] = None, - *args, - **kwargs, - ): - self.provider.add_metric(name=name, value=value, unit=unit, tags=tags) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - if not self.provider.counters and raise_on_empty_metrics: - warnings.warn( - "No application metrics to publish. The cold-start metric may be published if enabled. " - "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", - stacklevel=2, - ) - # not implemented for OTPL From 5c49b1ea6f84d2920fd46d5356cd4c905b193731 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Thu, 13 Jul 2023 16:15:48 -0700 Subject: [PATCH 17/32] add tests for datadog provider --- .../metrics/provider/datadog_provider.py | 10 +-- pyproject.toml | 3 + tests/functional/test_metrics.py | 64 ++++++++++++++++++- 3 files changed, 71 insertions(+), 6 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py index a8300451d1e..8840dff804b 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider.py @@ -142,7 +142,7 @@ def flush(self, metrics: List): for metric_item in metrics: print(json.dumps(metric_item, separators=(",", ":"))) - def clear(self): + def clear_metrics(self): self.metrics = [] @@ -152,7 +152,7 @@ class DataDogMetrics(MetricsBase): Parameters ---------- - provider: MetricsProviderBase + provider: DataDogProvider The datadog provider which will be used to process metrics data Example @@ -165,12 +165,12 @@ class DataDogMetrics(MetricsBase): metrics = DataDogMetrics(provider=dd_provider) @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) - def lambda_handler(event, context) + def lambda_handler(event, context): metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online']) """ # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` - def __init__(self, provider: MetricsProviderBase): + def __init__(self, provider: DataDogProvider): self.provider = provider super().__init__() @@ -229,4 +229,4 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: else: # will raise on empty metrics self.provider.flush(metrics) - self.provider.clear() + self.provider.clear_metrics() diff --git a/pyproject.toml b/pyproject.toml index 15145e03fde..110c4087253 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ aws-xray-sdk = { version = "^2.8.0", optional = true } fastjsonschema = { version = "^2.14.5", optional = true } pydantic = { version = "^1.8.2", optional = true } boto3 = { version = "^1.20.32", optional = true } +datadog_lambda = { version = "^4.75.0", optional = true } typing-extensions = "^4.6.2" opentelemetry-sdk = "^1.18.0" opentelemetry-exporter-otlp-proto-grpc = "^1.18.0" @@ -41,6 +42,7 @@ coverage = {extras = ["toml"], version = "^7.2"} pytest = "^7.4.0" black = "^23.3" boto3 = "^1.18" +datadog_lambda = "^4.70.0" isort = "^5.11.5" pytest-cov = "^4.1.0" pytest-mock = "^3.11.1" @@ -83,6 +85,7 @@ aws-requests-auth = "^0.4.3" parser = ["pydantic"] validation = ["fastjsonschema"] tracer = ["aws-xray-sdk"] +datadog = ["datadog_lambda"] all = ["pydantic", "aws-xray-sdk", "fastjsonschema"] # allow customers to run code locally without emulators (SAM CLI, etc.) aws-sdk = ["boto3"] diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 4f06b858c26..4c77c6104b6 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1,4 +1,5 @@ import json +import os import warnings from collections import namedtuple from typing import Any, Dict, List, Union @@ -20,7 +21,13 @@ MetricManager, reset_cold_start_flag, ) -from aws_lambda_powertools.metrics.provider import AmazonCloudWatchEMF, MetricsBase, MetricsProviderBase +from aws_lambda_powertools.metrics.provider import ( + AmazonCloudWatchEMF, + DataDogMetrics, + DataDogProvider, + MetricsBase, + MetricsProviderBase, +) @pytest.fixture(scope="function", autouse=True) @@ -1348,3 +1355,58 @@ def lambda_handler(evt, context): # and specifically about the lack of Metrics with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): lambda_handler({}, {}) + + +def test_datadog_coldstart(capsys): + dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DataDogMetrics(provider=dd_provider) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) + def lambda_handler(event, context): + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + + lambda_handler({}, LambdaContext("example_fn")) + logs = capsys.readouterr().out.strip() + assert "ColdStart" in logs + + +def test_datadog_write_to_log(capsys): + os.environ["DD_FLUSH_TO_LOG"] = "True" + dd_provider = DataDogProvider(namespace="Serverlesspresso") + metrics = DataDogMetrics(provider=dd_provider) + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + metrics.flush_metrics() + logs = capture_metrics_output(capsys) + logs["e"] = "" + assert logs == json.loads('{"m":"Serverlesspresso.item_sold","v":1,"e":"","t":["product:latte","order:online"]}') + + +def test_datadog_namespace(capsys): + dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DataDogMetrics(provider=dd_provider) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) + def lambda_handler(event, context): + metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) + + lambda_handler({}, LambdaContext("example_fn")) + logs = capsys.readouterr().out.strip() + assert "Serverlesspresso" in logs + + +def test_datadog_raise_on_empty(): + dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DataDogMetrics(provider=dd_provider) + + LambdaContext = namedtuple("LambdaContext", "function_name") + + @metrics.log_metrics(capture_cold_start_metric=False, raise_on_empty_metrics=True) + def lambda_handler(event, context): + pass + + with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): + lambda_handler({}, LambdaContext("example_fn")) From 1540be7c4c257c1b7c3febc39f078fb16e817968 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Thu, 13 Jul 2023 16:24:39 -0700 Subject: [PATCH 18/32] add tests for datadog provider --- pyproject.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 110c4087253..8d5f5a89f40 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,10 +32,8 @@ aws-xray-sdk = { version = "^2.8.0", optional = true } fastjsonschema = { version = "^2.14.5", optional = true } pydantic = { version = "^1.8.2", optional = true } boto3 = { version = "^1.20.32", optional = true } -datadog_lambda = { version = "^4.75.0", optional = true } +datadog_lambda = { version = "^4.70.0", optional = true } typing-extensions = "^4.6.2" -opentelemetry-sdk = "^1.18.0" -opentelemetry-exporter-otlp-proto-grpc = "^1.18.0" [tool.poetry.dev-dependencies] coverage = {extras = ["toml"], version = "^7.2"} From 01972e54907f1db7adb7a073365cf62516388316 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Mon, 24 Jul 2023 16:38:06 -0700 Subject: [PATCH 19/32] migrate from ABC to protocol, support convert kwargs to tags, add test --- .../metrics/provider/base.py | 24 +++---- .../metrics/provider/datadog_provider.py | 70 +++++++++++++------ tests/functional/test_metrics.py | 29 +++++++- 3 files changed, 87 insertions(+), 36 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index b505b51e309..b253c9deb80 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -2,15 +2,14 @@ import functools import logging -from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Dict, Optional, Protocol logger = logging.getLogger(__name__) is_cold_start = True -class MetricsProviderBase(ABC): +class MetricsProviderBase(Protocol): """ Class for metric provider template. @@ -23,8 +22,7 @@ class MetricsProviderBase(ABC): 3. Customize the behavior and functionality of the metric provider in your subclass. """ - @abstractmethod - def add_metric(self, *args, **kwargs): + def add_metric(self, *args: Any, **kwargs: Any) -> Any: """ Abstract method for adding a metric. @@ -49,8 +47,7 @@ def add_metric(self, *args, **kwargs): """ raise NotImplementedError - @abstractmethod - def serialize(self, *args, **kwargs): + def serialize(self, *args: Any, **kwargs: Any) -> Any: """ Abstract method for serialize a metric. @@ -76,8 +73,7 @@ def serialize(self, *args, **kwargs): raise NotImplementedError # flush serialized data to output, or send to API directly - @abstractmethod - def flush(self, *args, **kwargs): + def flush(self, *args: Any, **kwargs): """ Abstract method for flushing a metric. @@ -98,7 +94,7 @@ def flush(self, *args, **kwargs): raise NotImplementedError -class MetricsBase(ABC): +class MetricsBase(Protocol): """ Class for metric template. @@ -108,7 +104,6 @@ class MetricsBase(ABC): NOTE: need to improve this docstring """ - @abstractmethod def add_metric(self, *args, **kwargs): """ Abstract method for adding a metric. @@ -134,7 +129,6 @@ def add_metric(self, *args, **kwargs): """ raise NotImplementedError - @abstractmethod def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: """Manually flushes the metrics. This is normally not necessary, unless you're running on other runtimes besides Lambda, where the @log_metrics @@ -227,3 +221,9 @@ def _add_cold_start_metric(self, context: Any) -> None: self.add_metric(name="ColdStart", value=1, tag=[{"function_name": context.function_name}]) is_cold_start = False + + +def reset_cold_start_flag_provider(): + global is_cold_start + if not is_cold_start: + is_cold_start = True diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py index 8840dff804b..5c83d8cafe8 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider.py @@ -6,7 +6,7 @@ import os import time import warnings -from typing import List, Optional +from typing import Any, List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase @@ -57,7 +57,15 @@ def __init__(self, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = Fals super().__init__() # adding name,value,timestamp,tags - def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, tags: Optional[List] = None): + def add_metric( + self, + name: str, + value: float, + timestamp: Optional[int] = None, + tags: Optional[List] = None, + *args: Any, + **kwargs: Any, + ) -> None: """ The add_metrics function that will be used by metrics class. @@ -71,19 +79,30 @@ def add_metric(self, name: str, value: float, timestamp: Optional[int] = None, t Timestamp in int for the metrics, default = time.time() tags: List[str] In format like List["tag:value","tag2:value2"] + args: Any + extra args will be dropped for compatibility + kwargs: Any + extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] Examples -------- - add_metric( - name='coffee_house.order_value', - value=12.45, - tags=['product:latte', 'order:online'] - ) + >>> provider = DataDogProvider() + >>> + >>> provider.add_metric( + >>> name='coffee_house.order_value', + >>> value=12.45, + >>> tags=['product:latte', 'order:online'], + >>> sales='sam' + >>> ) """ if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") + if tags is None: + tags = [] if not timestamp: timestamp = int(time.time()) + for k, w in kwargs.items(): + tags.append(f"{k}:{w}") self.metrics.append({"m": name, "v": value, "e": timestamp, "t": tags}) def serialize(self) -> List: @@ -159,14 +178,14 @@ class DataDogMetrics(MetricsBase): ------- **Creates a few metrics and publish at the end of a function execution** - from aws_lambda_powertools.metrics.provider import DataDogMetrics, DataDogProvider - - dd_provider = DataDogProvider(namespace="Serverlesspresso") - metrics = DataDogMetrics(provider=dd_provider) - - @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) - def lambda_handler(event, context): - metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online']) + >>> from aws_lambda_powertools.metrics.provider import DataDogMetrics, DataDogProvider + >>> + >>> dd_provider = DataDogProvider(namespace="Serverlesspresso") + >>> metrics = DataDogMetrics(provider=dd_provider) + >>> + >>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) + >>> def lambda_handler(event, context): + >>> metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online']) """ # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` @@ -196,17 +215,24 @@ def add_metric( timestamp: int Timestamp in int for the metrics, default = time.time() tags: List[str] - In format like List["tag:value","tag2:value2"] + In format like List["tag:value","tag2:value2"], + args: Any + extra args will be passed into provider and be dropped + kwargs: Any + extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] Examples -------- - add_metric( - name='coffee_house.order_value', - value=12.45, - tags=['product:latte', 'order:online'] - ) + >>> from aws_lambda_powertools.metrics.provider import DataDogMetrics, DataDogProvider + >>> + >>> metrics = DataDogMetrics(provider=DataDogProvider()) + >>> metrics.add_metric( + >>> name='coffee_house.order_value', + >>> value=12.45, + >>> tags=['product:latte', 'order:online'] + >>> ) """ - self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags) + self.provider.add_metric(*args, name=name, value=value, timestamp=timestamp, tags=tags, **kwargs) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: """ diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 4c77c6104b6..5e5bf2776b4 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -28,6 +28,7 @@ MetricsBase, MetricsProviderBase, ) +from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider @pytest.fixture(scope="function", autouse=True) @@ -1223,7 +1224,7 @@ def lambda_handler(evt, ctx): @pytest.fixture def metrics_provider() -> MetricsProviderBase: - class MetricsProvider(MetricsProviderBase): + class MetricsProvider: def __init__(self): self.metric_store: List = [] self.result: str @@ -1323,6 +1324,7 @@ def lambda_handler(evt, context): def test_metrics_provider_class_no_coldstart(capsys, metrics_provider, metrics_class): + reset_cold_start_flag_provider() my_metrics = metrics_class(provider=metrics_provider()) # WHEN log_metrics is used with capture_cold_start_metric @@ -1358,6 +1360,7 @@ def lambda_handler(evt, context): def test_datadog_coldstart(capsys): + reset_cold_start_flag_provider() dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) metrics = DataDogMetrics(provider=dd_provider) @@ -1367,7 +1370,7 @@ def test_datadog_coldstart(capsys): def lambda_handler(event, context): metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) - lambda_handler({}, LambdaContext("example_fn")) + lambda_handler({}, LambdaContext("example_fn2")) logs = capsys.readouterr().out.strip() assert "ColdStart" in logs @@ -1410,3 +1413,25 @@ def lambda_handler(event, context): with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): lambda_handler({}, LambdaContext("example_fn")) + + +def test_datadog_kwargs(capsys): + dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DataDogMetrics(provider=dd_provider) + metrics.add_metric( + name="order_valve", + value=12.45, + tags=["test:kwargs"], + str="str", + int=123, + float=45.6, + dict={"type": "termination identified"}, + ) + metrics.flush_metrics() + logs = capsys.readouterr().out.strip() + log_dict = json.loads(logs) + tag_list = log_dict.get("t") + assert "test:kwargs" in tag_list + assert "str:str" in tag_list + assert "int:123" in tag_list + assert "float:45.6" in tag_list From c31d41f70dfca8c39c915e6f35b06223bb82b871 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Mon, 24 Jul 2023 16:53:35 -0700 Subject: [PATCH 20/32] migrate from ABC to protocol, support convert kwargs to tags, add test --- .../metrics/provider/datadog_provider.py | 5 ++--- tests/functional/test_metrics.py | 11 +++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py index 5c83d8cafe8..5575c9b5092 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider.py @@ -63,7 +63,6 @@ def add_metric( value: float, timestamp: Optional[int] = None, tags: Optional[List] = None, - *args: Any, **kwargs: Any, ) -> None: """ @@ -217,7 +216,7 @@ def add_metric( tags: List[str] In format like List["tag:value","tag2:value2"], args: Any - extra args will be passed into provider and be dropped + extra args will be dropped kwargs: Any extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] @@ -232,7 +231,7 @@ def add_metric( >>> tags=['product:latte', 'order:online'] >>> ) """ - self.provider.add_metric(*args, name=name, value=value, timestamp=timestamp, tags=tags, **kwargs) + self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags, **kwargs) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: """ diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 5e5bf2776b4..3eed61a1271 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1415,6 +1415,17 @@ def lambda_handler(event, context): lambda_handler({}, LambdaContext("example_fn")) +def test_datadog_args(capsys): + dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) + metrics = DataDogMetrics(provider=dd_provider) + metrics.add_metric("order_valve", 12.45, sales="sam") + metrics.flush_metrics() + logs = capsys.readouterr().out.strip() + log_dict = json.loads(logs) + tag_list = log_dict.get("t") + assert "sales:sam" in tag_list + + def test_datadog_kwargs(capsys): dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) metrics = DataDogMetrics(provider=dd_provider) From 3bebcad0dc1c701a283c1085427b73e6e5836858 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Mon, 24 Jul 2023 16:55:57 -0700 Subject: [PATCH 21/32] migrate from ABC to protocol, support convert kwargs to tags, add test --- aws_lambda_powertools/metrics/provider/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index b253c9deb80..34b38edae07 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -73,7 +73,7 @@ def serialize(self, *args: Any, **kwargs: Any) -> Any: raise NotImplementedError # flush serialized data to output, or send to API directly - def flush(self, *args: Any, **kwargs): + def flush(self, *args: Any, **kwargs) -> Any: """ Abstract method for flushing a metric. From e7c1443aecc95b293bf5fe5290629382bfa57d13 Mon Sep 17 00:00:00 2001 From: Roger Zhang Date: Tue, 25 Jul 2023 11:03:52 -0700 Subject: [PATCH 22/32] remove parent class, fix example --- aws_lambda_powertools/metrics/provider/datadog_provider.py | 4 ++-- examples/metrics/src/use_providers.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py index 5575c9b5092..3f67da286df 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider.py @@ -9,7 +9,7 @@ from typing import Any, List, Optional from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.provider import MetricsBase, MetricsProviderBase +from aws_lambda_powertools.metrics.provider import MetricsBase logger = logging.getLogger(__name__) @@ -22,7 +22,7 @@ DEFAULT_NAMESPACE = "default" -class DataDogProvider(MetricsProviderBase): +class DataDogProvider: """ Class for datadog provider. This Class should only be used inside DataDogMetrics all datadog metric data will be stored as diff --git a/examples/metrics/src/use_providers.py b/examples/metrics/src/use_providers.py index e7260d084c7..82b11db83f4 100644 --- a/examples/metrics/src/use_providers.py +++ b/examples/metrics/src/use_providers.py @@ -1,4 +1,4 @@ -from aws_lambda_powertools.metrics.provider.datadog_provider_draft import ( +from aws_lambda_powertools.metrics.provider.datadog_provider import ( DataDogMetrics, DataDogProvider, ) @@ -9,4 +9,4 @@ @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["category:online"]) + metrics.add_metric(name="item_sold", value=1, tags=["category:online"], product="latte") From a9047647561010546ad053009afead72a001a333 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 27 Jul 2023 11:37:55 +0100 Subject: [PATCH 23/32] base: fix small problems --- .../metrics/provider/base.py | 15 +++- .../metrics/provider/datadog_provider.py | 71 ++++++++++++------- tests/functional/test_metrics.py | 5 +- 3 files changed, 62 insertions(+), 29 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 34b38edae07..83e7833a2be 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -141,6 +141,19 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: """ raise NotImplementedError + def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: + """ + Add a cold start metric for a specific function. + + Parameters + ---------- + metric_name: str + The name of the cold start metric to add. + function_name: str + The name of the function associated with the cold start metric. + """ + raise NotImplementedError + def log_metrics( self, lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, @@ -218,7 +231,7 @@ def _add_cold_start_metric(self, context: Any) -> None: return logger.debug("Adding cold start metric and function_name dimension") - self.add_metric(name="ColdStart", value=1, tag=[{"function_name": context.function_name}]) + self.add_cold_start_metric(metric_name="ColdStart", function_name=context.function_name) is_cold_start = False diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py index 3f67da286df..4875fef5b12 100644 --- a/aws_lambda_powertools/metrics/provider/datadog_provider.py +++ b/aws_lambda_powertools/metrics/provider/datadog_provider.py @@ -49,20 +49,19 @@ def __init__(self, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = Fals (namespace.metrics_name) flush_to_log: bool Flush datadog metrics to log (collect with log forwarder) rather than using datadog extension + See: https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation """ self.metrics: List = [] self.namespace: str = namespace - # either is true then flush to log self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log super().__init__() - # adding name,value,timestamp,tags def add_metric( self, name: str, value: float, - timestamp: Optional[int] = None, - tags: Optional[List] = None, + timestamp: int | None = None, + tags: List | None = None, **kwargs: Any, ) -> None: """ @@ -71,40 +70,54 @@ def add_metric( Parameters ---------- name: str - Name/Key for the metrics + Name/Key for the metric value: float - Value for the metrics + Value for the metric timestamp: int - Timestamp in int for the metrics, default = time.time() + Timestamp in int for the metrics, default = None tags: List[str] In format like List["tag:value","tag2:value2"] - args: Any - extra args will be dropped for compatibility kwargs: Any - extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] + extra kwargs will be converted into tags, e.g., add_metrics(sales="sam") -> tags=['sales:sam'] Examples -------- + >>> from aws_lambda_powertools.utilities.typing import LambdaContext + >>> from aws_lambda_powertools.metrics.provider import DataDogMetrics + >>> from aws_lambda_powertools.metrics.provider import DataDogProvider + >>> provider = DataDogProvider() - >>> - >>> provider.add_metric( - >>> name='coffee_house.order_value', - >>> value=12.45, - >>> tags=['product:latte', 'order:online'], - >>> sales='sam' - >>> ) + >>> metrics = DataDogMetrics(provider=provider) + + >>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) + >>> def lambda_handler(event: dict, context: LambdaContext): + >>> metrics.add_metric(name="SuccessfulBooking", value=1, product="airline") """ + + if not tags: + tags = [] + if not isinstance(value, numbers.Real): raise MetricValueError(f"{value} is not a valid number") - if tags is None: - tags = [] - if not timestamp: - timestamp = int(time.time()) - for k, w in kwargs.items(): - tags.append(f"{k}:{w}") + + for tag_key, tag_value in kwargs.items(): + tags.append(f"{tag_key}:{tag_value}") + self.metrics.append({"m": name, "v": value, "e": timestamp, "t": tags}) def serialize(self) -> List: + """ + Serialize the metrics in the current instance of metrics. + + Returns + ------- + A list of dict, where each dictionary represents a serialized metric with the following keys: + - 'm' (str): The metric name. If the namespace is not None, the name will be in the format + 'namespace.metric_name', otherwise, it will be just 'metric_name'. + - 'v' (float): The value of the metric. + - 'e' (float): The timestamp associated with the metric. + - 't' (str): The tags of the metric. + """ output_list: List = [] for single_metric in self.metrics: @@ -112,6 +125,7 @@ def serialize(self) -> List: metric_name = f"{self.namespace}.{single_metric['m']}" else: metric_name = single_metric["m"] + output_list.append( { "m": metric_name, @@ -123,7 +137,6 @@ def serialize(self) -> List: return output_list - # flush serialized data to output def flush(self, metrics: List): """ @@ -144,6 +157,7 @@ def flush(self, metrics: List): """ if len(metrics) == 0: raise SchemaValidationError("Must contain at least one metric.") + # submit through datadog extension if lambda_metric and self.flush_to_log is False: # use lambda_metric function from datadog package, submit metrics to datadog @@ -160,7 +174,10 @@ def flush(self, metrics: List): for metric_item in metrics: print(json.dumps(metric_item, separators=(",", ":"))) + self.clear_metrics() + def clear_metrics(self): + logger.debug("Clearing out existing metric set from memory") self.metrics = [] @@ -184,7 +201,7 @@ class DataDogMetrics(MetricsBase): >>> >>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) >>> def lambda_handler(event, context): - >>> metrics.add_metric(name="item_sold",value=1,tags=['product:latte', 'order:online']) + >>> metrics.add_metric(name="item_sold",value=1, product="latte", order="online") """ # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` @@ -254,4 +271,6 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: else: # will raise on empty metrics self.provider.flush(metrics) - self.provider.clear_metrics() + + def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: + self.provider.add_metric(name=metric_name, value=1, timestamp=int(time.time()), function_name=function_name) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 3eed61a1271..342f0cc1e50 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1264,6 +1264,9 @@ def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: self.provider.flush() self.provider.clear() + def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: + self.provider.add_metric(name=metric_name, value=1, function_name=function_name) + return MetricsClass @@ -1319,8 +1322,6 @@ def lambda_handler(evt, context): # THEN ColdStart metric and function_name and service dimension should be logged assert output[0]["name"] == "ColdStart" - assert output[0]["value"] == 1 - assert output[0]["tag"] == [{"function_name": "example_fn"}] def test_metrics_provider_class_no_coldstart(capsys, metrics_provider, metrics_class): From 58a25fbe75b289d67e95c70024113c7293c91b77 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 27 Jul 2023 14:35:24 +0100 Subject: [PATCH 24/32] refactoring: removing Datadog provider --- .../metrics/provider/__init__.py | 6 - .../metrics/provider/datadog_provider.py | 276 ------------- examples/metrics/src/use_providers.py | 12 - poetry.lock | 380 ++++-------------- pyproject.toml | 3 - tests/functional/test_metrics.py | 92 ----- 6 files changed, 75 insertions(+), 694 deletions(-) delete mode 100644 aws_lambda_powertools/metrics/provider/datadog_provider.py delete mode 100644 examples/metrics/src/use_providers.py diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index a06007a7918..cecdf2de588 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -5,16 +5,10 @@ Metrics, ) from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase -from aws_lambda_powertools.metrics.provider.datadog_provider import ( - DataDogMetrics, - DataDogProvider, -) __all__ = [ "MetricsBase", "MetricsProviderBase", - "DataDogMetrics", - "DataDogProvider", "Metrics", "AmazonCloudWatchEMF", "EphemeralAmazonCloudWatchEMF", diff --git a/aws_lambda_powertools/metrics/provider/datadog_provider.py b/aws_lambda_powertools/metrics/provider/datadog_provider.py deleted file mode 100644 index 4875fef5b12..00000000000 --- a/aws_lambda_powertools/metrics/provider/datadog_provider.py +++ /dev/null @@ -1,276 +0,0 @@ -from __future__ import annotations - -import json -import logging -import numbers -import os -import time -import warnings -from typing import Any, List, Optional - -from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError -from aws_lambda_powertools.metrics.provider import MetricsBase - -logger = logging.getLogger(__name__) - -# Check if using datadog layer -try: - from datadog_lambda.metric import lambda_metric # type: ignore -except ImportError: - lambda_metric = None - -DEFAULT_NAMESPACE = "default" - - -class DataDogProvider: - """ - Class for datadog provider. This Class should only be used inside DataDogMetrics - all datadog metric data will be stored as - { - "m": metric_name, - "v": value, - "e": timestamp - "t": List["tag:value","tag2:value2"] - } - see https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 - - Examples - -------- - - """ - - def __init__(self, namespace: str = DEFAULT_NAMESPACE, flush_to_log: bool = False): - """ - - Parameters - ---------- - namespace: str - For datadog, namespace will be appended in front of the metrics name in metrics exported. - (namespace.metrics_name) - flush_to_log: bool - Flush datadog metrics to log (collect with log forwarder) rather than using datadog extension - See: https://docs.datadoghq.com/logs/guide/forwarder/?tab=cloudformation - """ - self.metrics: List = [] - self.namespace: str = namespace - self.flush_to_log = (os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true") or flush_to_log - super().__init__() - - def add_metric( - self, - name: str, - value: float, - timestamp: int | None = None, - tags: List | None = None, - **kwargs: Any, - ) -> None: - """ - The add_metrics function that will be used by metrics class. - - Parameters - ---------- - name: str - Name/Key for the metric - value: float - Value for the metric - timestamp: int - Timestamp in int for the metrics, default = None - tags: List[str] - In format like List["tag:value","tag2:value2"] - kwargs: Any - extra kwargs will be converted into tags, e.g., add_metrics(sales="sam") -> tags=['sales:sam'] - - Examples - -------- - >>> from aws_lambda_powertools.utilities.typing import LambdaContext - >>> from aws_lambda_powertools.metrics.provider import DataDogMetrics - >>> from aws_lambda_powertools.metrics.provider import DataDogProvider - - >>> provider = DataDogProvider() - >>> metrics = DataDogMetrics(provider=provider) - - >>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) - >>> def lambda_handler(event: dict, context: LambdaContext): - >>> metrics.add_metric(name="SuccessfulBooking", value=1, product="airline") - """ - - if not tags: - tags = [] - - if not isinstance(value, numbers.Real): - raise MetricValueError(f"{value} is not a valid number") - - for tag_key, tag_value in kwargs.items(): - tags.append(f"{tag_key}:{tag_value}") - - self.metrics.append({"m": name, "v": value, "e": timestamp, "t": tags}) - - def serialize(self) -> List: - """ - Serialize the metrics in the current instance of metrics. - - Returns - ------- - A list of dict, where each dictionary represents a serialized metric with the following keys: - - 'm' (str): The metric name. If the namespace is not None, the name will be in the format - 'namespace.metric_name', otherwise, it will be just 'metric_name'. - - 'v' (float): The value of the metric. - - 'e' (float): The timestamp associated with the metric. - - 't' (str): The tags of the metric. - """ - output_list: List = [] - - for single_metric in self.metrics: - if self.namespace != DEFAULT_NAMESPACE: - metric_name = f"{self.namespace}.{single_metric['m']}" - else: - metric_name = single_metric["m"] - - output_list.append( - { - "m": metric_name, - "v": single_metric["v"], - "e": single_metric["e"], - "t": single_metric["t"], - }, - ) - - return output_list - - def flush(self, metrics: List): - """ - - Parameters - ---------- - metrics: List[Dict] - [{ - "m": metric_name, - "v": value, - "e": timestamp - "t": List["tag:value","tag2:value2"] - }] - - Raises - ------- - SchemaValidationError - When metric object fails EMF schema validation - """ - if len(metrics) == 0: - raise SchemaValidationError("Must contain at least one metric.") - - # submit through datadog extension - if lambda_metric and self.flush_to_log is False: - # use lambda_metric function from datadog package, submit metrics to datadog - for metric_item in metrics: - lambda_metric( - metric_name=metric_item["m"], - value=metric_item["v"], - timestamp=metric_item["e"], - tags=metric_item["t"], - ) - else: - # dd module not found: flush to log, this format can be recognized via datadog log forwarder - # https://github.com/DataDog/datadog-lambda-python/blob/main/datadog_lambda/metric.py#L77 - for metric_item in metrics: - print(json.dumps(metric_item, separators=(",", ":"))) - - self.clear_metrics() - - def clear_metrics(self): - logger.debug("Clearing out existing metric set from memory") - self.metrics = [] - - -class DataDogMetrics(MetricsBase): - """ - Class for datadog metrics - - Parameters - ---------- - provider: DataDogProvider - The datadog provider which will be used to process metrics data - - Example - ------- - **Creates a few metrics and publish at the end of a function execution** - - >>> from aws_lambda_powertools.metrics.provider import DataDogMetrics, DataDogProvider - >>> - >>> dd_provider = DataDogProvider(namespace="Serverlesspresso") - >>> metrics = DataDogMetrics(provider=dd_provider) - >>> - >>> @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) - >>> def lambda_handler(event, context): - >>> metrics.add_metric(name="item_sold",value=1, product="latte", order="online") - """ - - # `log_metrics` and `_add_cold_start_metric` are directly inherited from `MetricsBase` - def __init__(self, provider: DataDogProvider): - self.provider = provider - super().__init__() - - # drop additional kwargs to keep same experience - def add_metric( - self, - name: str, - value: float, - timestamp: Optional[int] = None, - tags: Optional[List] = None, - *args, - **kwargs, - ): - """ - The add_metrics function that will be used by metrics class. - - Parameters - ---------- - name: str - Name/Key for the metrics - value: float - Value for the metrics - timestamp: int - Timestamp in int for the metrics, default = time.time() - tags: List[str] - In format like List["tag:value","tag2:value2"], - args: Any - extra args will be dropped - kwargs: Any - extra kwargs will be converted into tags, e.g., add_metrics(sales=sam) -> tags=['sales:sam'] - - Examples - -------- - >>> from aws_lambda_powertools.metrics.provider import DataDogMetrics, DataDogProvider - >>> - >>> metrics = DataDogMetrics(provider=DataDogProvider()) - >>> metrics.add_metric( - >>> name='coffee_house.order_value', - >>> value=12.45, - >>> tags=['product:latte', 'order:online'] - >>> ) - """ - self.provider.add_metric(name=name, value=value, timestamp=timestamp, tags=tags, **kwargs) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - """ - Manually flushes the metrics. This is normally not necessary, - unless you're running on other runtimes besides Lambda, where the @log_metrics - decorator already handles things for you. - - Parameters - ---------- - raise_on_empty_metrics: bool - raise exception if no metrics are emitted, by default False - """ - metrics = self.provider.serialize() - if not metrics and not raise_on_empty_metrics: - warnings.warn( - "No application metrics to publish. The cold-start metric may be published if enabled. " - "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", - stacklevel=2, - ) - else: - # will raise on empty metrics - self.provider.flush(metrics) - - def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: - self.provider.add_metric(name=metric_name, value=1, timestamp=int(time.time()), function_name=function_name) diff --git a/examples/metrics/src/use_providers.py b/examples/metrics/src/use_providers.py deleted file mode 100644 index 82b11db83f4..00000000000 --- a/examples/metrics/src/use_providers.py +++ /dev/null @@ -1,12 +0,0 @@ -from aws_lambda_powertools.metrics.provider.datadog_provider import ( - DataDogMetrics, - DataDogProvider, -) - -dd_provider = DataDogProvider(namespace="default") -metrics = DataDogMetrics(provider=dd_provider) - - -@metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=False) -def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["category:online"], product="latte") diff --git a/poetry.lock b/poetry.lock index d6e6c3967cf..95b86b402b6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -216,17 +216,6 @@ files = [ botocore = ">=1.11.3" wrapt = "*" -[[package]] -name = "backoff" -version = "2.2.1" -description = "Function decoration for backoff and retry" -optional = false -python-versions = ">=3.7,<4.0" -files = [ - {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, - {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, -] - [[package]] name = "bandit" version = "1.7.5" @@ -302,17 +291,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.28.9" +version = "1.28.12" description = "The AWS SDK for Python" optional = false python-versions = ">= 3.7" files = [ - {file = "boto3-1.28.9-py3-none-any.whl", hash = "sha256:01f078047eb4d238c6b9c6cc623f2af33b4ae67980c5326691e35cb5493ff6c7"}, - {file = "boto3-1.28.9.tar.gz", hash = "sha256:4cc0c6005be910e52077227e670930ab55a41ba86cdb6d1c052571d08cd4d32c"}, + {file = "boto3-1.28.12-py3-none-any.whl", hash = "sha256:cfcb20d5784428f31d89889e68b26efeda90f231c3119eef4af8b25ad405c55f"}, + {file = "boto3-1.28.12.tar.gz", hash = "sha256:d5ac6599951fdd519ed26c6fe15c41a7aa4021cb9adce33167344f8ce5cdb07b"}, ] [package.dependencies] -botocore = ">=1.31.9,<1.32.0" +botocore = ">=1.31.12,<1.32.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.6.0,<0.7.0" @@ -321,13 +310,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.31.9" +version = "1.31.12" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">= 3.7" files = [ - {file = "botocore-1.31.9-py3-none-any.whl", hash = "sha256:e56ccd3536a90094ea5b176b5dd33bfe4f049efdf71af468ea1661bd424c787d"}, - {file = "botocore-1.31.9.tar.gz", hash = "sha256:bd849d3ac95f1781385ed831d753a04a3ec870a59d6598175aaedd71dc2baf5f"}, + {file = "botocore-1.31.12-py3-none-any.whl", hash = "sha256:86380672151866b5e425636e3ebad74f2b83e7163e36ef5d38d11a04b9cba33b"}, + {file = "botocore-1.31.12.tar.gz", hash = "sha256:7e5db466c762a071bb58c9a39d070f1333ce4f4ba6fdf9820ba21e87bd4c7e29"}, ] [package.dependencies] @@ -620,23 +609,6 @@ files = [ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, ] -[[package]] -name = "deprecated" -version = "1.2.14" -description = "Python @deprecated decorator to deprecate old python classes, functions or methods." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, - {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, -] - -[package.dependencies] -wrapt = ">=1.10,<2" - -[package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] - [[package]] name = "exceptiongroup" version = "1.1.2" @@ -750,80 +722,6 @@ files = [ gitdb = ">=4.0.1,<5" typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""} -[[package]] -name = "googleapis-common-protos" -version = "1.59.1" -description = "Common protobufs used in Google APIs" -optional = false -python-versions = ">=3.7" -files = [ - {file = "googleapis-common-protos-1.59.1.tar.gz", hash = "sha256:b35d530fe825fb4227857bc47ad84c33c809ac96f312e13182bdeaa2abe1178a"}, - {file = "googleapis_common_protos-1.59.1-py2.py3-none-any.whl", hash = "sha256:0cbedb6fb68f1c07e18eb4c48256320777707e7d0c55063ae56c15db3224a61e"}, -] - -[package.dependencies] -protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0" - -[package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] - -[[package]] -name = "grpcio" -version = "1.56.0" -description = "HTTP/2-based RPC framework" -optional = false -python-versions = ">=3.7" -files = [ - {file = "grpcio-1.56.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:fb34ace11419f1ae321c36ccaa18d81cd3f20728cd191250be42949d6845bb2d"}, - {file = "grpcio-1.56.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:008767c0aed4899e657b50f2e0beacbabccab51359eba547f860e7c55f2be6ba"}, - {file = "grpcio-1.56.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:17f47aeb9be0da5337f9ff33ebb8795899021e6c0741ee68bd69774a7804ca86"}, - {file = "grpcio-1.56.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43c50d810cc26349b093bf2cfe86756ab3e9aba3e7e681d360930c1268e1399a"}, - {file = "grpcio-1.56.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:187b8f71bad7d41eea15e0c9812aaa2b87adfb343895fffb704fb040ca731863"}, - {file = "grpcio-1.56.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:881575f240eb5db72ddca4dc5602898c29bc082e0d94599bf20588fb7d1ee6a0"}, - {file = "grpcio-1.56.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c243b158dd7585021d16c50498c4b2ec0a64a6119967440c5ff2d8c89e72330e"}, - {file = "grpcio-1.56.0-cp310-cp310-win32.whl", hash = "sha256:8b3b2c7b5feef90bc9a5fa1c7f97637e55ec3e76460c6d16c3013952ee479cd9"}, - {file = "grpcio-1.56.0-cp310-cp310-win_amd64.whl", hash = "sha256:03a80451530fd3b8b155e0c4480434f6be669daf7ecba56f73ef98f94222ee01"}, - {file = "grpcio-1.56.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:64bd3abcf9fb4a9fa4ede8d0d34686314a7075f62a1502217b227991d9ca4245"}, - {file = "grpcio-1.56.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:fdc3a895791af4addbb826808d4c9c35917c59bb5c430d729f44224e51c92d61"}, - {file = "grpcio-1.56.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:4f84a6fd4482e5fe73b297d4874b62a535bc75dc6aec8e9fe0dc88106cd40397"}, - {file = "grpcio-1.56.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14e70b4dda3183abea94c72d41d5930c333b21f8561c1904a372d80370592ef3"}, - {file = "grpcio-1.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b5ce42a5ebe3e04796246ba50357f1813c44a6efe17a37f8dc7a5c470377312"}, - {file = "grpcio-1.56.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8219f17baf069fe8e42bd8ca0b312b875595e43a70cabf397be4fda488e2f27d"}, - {file = "grpcio-1.56.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:defdd14b518e6e468466f799aaa69db0355bca8d3a5ea75fb912d28ba6f8af31"}, - {file = "grpcio-1.56.0-cp311-cp311-win32.whl", hash = "sha256:50f4daa698835accbbcc60e61e0bc29636c0156ddcafb3891c987e533a0031ba"}, - {file = "grpcio-1.56.0-cp311-cp311-win_amd64.whl", hash = "sha256:59c4e606993a47146fbeaf304b9e78c447f5b9ee5641cae013028c4cca784617"}, - {file = "grpcio-1.56.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b1f4b6f25a87d80b28dd6d02e87d63fe1577fe6d04a60a17454e3f8077a38279"}, - {file = "grpcio-1.56.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:c2148170e01d464d41011a878088444c13413264418b557f0bdcd1bf1b674a0e"}, - {file = "grpcio-1.56.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:0409de787ebbf08c9d2bca2bcc7762c1efe72eada164af78b50567a8dfc7253c"}, - {file = "grpcio-1.56.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66f0369d27f4c105cd21059d635860bb2ea81bd593061c45fb64875103f40e4a"}, - {file = "grpcio-1.56.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38fdf5bd0a1c754ce6bf9311a3c2c7ebe56e88b8763593316b69e0e9a56af1de"}, - {file = "grpcio-1.56.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:79d4c5911d12a7aa671e5eb40cbb50a830396525014d2d6f254ea2ba180ce637"}, - {file = "grpcio-1.56.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5d2fc471668a7222e213f86ef76933b18cdda6a51ea1322034478df8c6519959"}, - {file = "grpcio-1.56.0-cp37-cp37m-win_amd64.whl", hash = "sha256:991224fd485e088d3cb5e34366053691a4848a6b7112b8f5625a411305c26691"}, - {file = "grpcio-1.56.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:c6f36621aabecbaff3e70c4d1d924c76c8e6a7ffec60c331893640a4af0a8037"}, - {file = "grpcio-1.56.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:1eadd6de258901929223f422ffed7f8b310c0323324caf59227f9899ea1b1674"}, - {file = "grpcio-1.56.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:72836b5a1d4f508ffbcfe35033d027859cc737972f9dddbe33fb75d687421e2e"}, - {file = "grpcio-1.56.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f92a99ab0c7772fb6859bf2e4f44ad30088d18f7c67b83205297bfb229e0d2cf"}, - {file = "grpcio-1.56.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa08affbf672d051cd3da62303901aeb7042a2c188c03b2c2a2d346fc5e81c14"}, - {file = "grpcio-1.56.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e2db108b4c8e29c145e95b0226973a66d73ae3e3e7fae00329294af4e27f1c42"}, - {file = "grpcio-1.56.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8674fdbd28266d8efbcddacf4ec3643f76fe6376f73283fd63a8374c14b0ef7c"}, - {file = "grpcio-1.56.0-cp38-cp38-win32.whl", hash = "sha256:bd55f743e654fb050c665968d7ec2c33f03578a4bbb163cfce38024775ff54cc"}, - {file = "grpcio-1.56.0-cp38-cp38-win_amd64.whl", hash = "sha256:c63bc5ac6c7e646c296fed9139097ae0f0e63f36f0864d7ce431cce61fe0118a"}, - {file = "grpcio-1.56.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:c0bc9dda550785d23f4f025be614b7faa8d0293e10811f0f8536cf50435b7a30"}, - {file = "grpcio-1.56.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:d596408bab632ec7b947761e83ce6b3e7632e26b76d64c239ba66b554b7ee286"}, - {file = "grpcio-1.56.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:76b6e6e1ee9bda32e6e933efd61c512e9a9f377d7c580977f090d1a9c78cca44"}, - {file = "grpcio-1.56.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7beb84ebd0a3f732625124b73969d12b7350c5d9d64ddf81ae739bbc63d5b1ed"}, - {file = "grpcio-1.56.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83ec714bbbe9b9502177c842417fde39f7a267031e01fa3cd83f1ca49688f537"}, - {file = "grpcio-1.56.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:4feee75565d1b5ab09cb3a5da672b84ca7f6dd80ee07a50f5537207a9af543a4"}, - {file = "grpcio-1.56.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b4638a796778329cc8e142e4f57c705adb286b3ba64e00b0fa91eeb919611be8"}, - {file = "grpcio-1.56.0-cp39-cp39-win32.whl", hash = "sha256:437af5a7673bca89c4bc0a993382200592d104dd7bf55eddcd141cef91f40bab"}, - {file = "grpcio-1.56.0-cp39-cp39-win_amd64.whl", hash = "sha256:4241a1c2c76e748023c834995cd916570e7180ee478969c2d79a60ce007bc837"}, - {file = "grpcio-1.56.0.tar.gz", hash = "sha256:4c08ee21b3d10315b8dc26f6c13917b20ed574cdbed2d2d80c53d5508fdcc0f2"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.56.0)"] - [[package]] name = "h11" version = "0.14.0" @@ -997,13 +895,13 @@ files = [ [[package]] name = "importlib-metadata" -version = "6.0.1" +version = "6.7.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "importlib_metadata-6.0.1-py3-none-any.whl", hash = "sha256:1543daade821c89b1c4a55986c326f36e54f2e6ca3bad96be4563d0acb74dcd4"}, - {file = "importlib_metadata-6.0.1.tar.gz", hash = "sha256:950127d57e35a806d520817d3e92eec3f19fdae9f0cd99da77a407c5aabefba3"}, + {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, + {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, ] [package.dependencies] @@ -1013,7 +911,7 @@ zipp = ">=0.5" [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] perf = ["ipython"] -testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] [[package]] name = "importlib-resources" @@ -1133,6 +1031,7 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade"}, + {file = "jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c"}, ] [package.dependencies] @@ -1165,6 +1064,7 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" files = [ {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, + {file = "jsonpointer-2.4.tar.gz", hash = "sha256:585cee82b70211fa9e6043b7bb89db6e1aa49524340dde8ad6b63206ea689d88"}, ] [[package]] @@ -1243,19 +1143,20 @@ restructuredtext = ["rst2ansi"] [[package]] name = "markdown" -version = "3.3.7" -description = "Python implementation of Markdown." +version = "3.4.4" +description = "Python implementation of John Gruber's Markdown." optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "Markdown-3.3.7-py3-none-any.whl", hash = "sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"}, - {file = "Markdown-3.3.7.tar.gz", hash = "sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874"}, + {file = "Markdown-3.4.4-py3-none-any.whl", hash = "sha256:a4c1b65c0957b4bd9e7d86ddc7b3c9868fb9670660f6f99f6d1bca8954d5a941"}, + {file = "Markdown-3.4.4.tar.gz", hash = "sha256:225c6123522495d4119a90b3a3ba31a1e87a70369e03f14799ea9c0d7183a3d6"}, ] [package.dependencies] importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} [package.extras] +docs = ["mdx-gh-links (>=0.2)", "mkdocs (>=1.0)", "mkdocs-nature (>=0.4)"] testing = ["coverage", "pyyaml"] [[package]] @@ -1387,13 +1288,13 @@ test = ["coverage", "flake8 (>=3.0)", "shtab"] [[package]] name = "mkdocs" -version = "1.4.3" +version = "1.5.0" description = "Project documentation with Markdown." optional = false python-versions = ">=3.7" files = [ - {file = "mkdocs-1.4.3-py3-none-any.whl", hash = "sha256:6ee46d309bda331aac915cd24aab882c179a933bd9e77b80ce7d2eaaa3f689dd"}, - {file = "mkdocs-1.4.3.tar.gz", hash = "sha256:5955093bbd4dd2e9403c5afaf57324ad8b04f16886512a3ee6ef828956481c57"}, + {file = "mkdocs-1.5.0-py3-none-any.whl", hash = "sha256:91a75e3a5a75e006b2149814d5c56af170039ceda0732f51e7af1a463599c00d"}, + {file = "mkdocs-1.5.0.tar.gz", hash = "sha256:ff54eac0b74bf39a2e91f179e2ac16ef36f0294b9ab161c22f564382b30a31ae"}, ] [package.dependencies] @@ -1402,9 +1303,12 @@ colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} ghp-import = ">=1.0" importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} jinja2 = ">=2.11.1" -markdown = ">=3.2.1,<3.4" +markdown = ">=3.2.1" +markupsafe = ">=2.0.1" mergedeep = ">=1.3.4" packaging = ">=20.5" +pathspec = ">=0.11.1" +platformdirs = ">=2.2.0" pyyaml = ">=5.1" pyyaml-env-tag = ">=0.1" typing-extensions = {version = ">=3.10", markers = "python_version < \"3.8\""} @@ -1412,7 +1316,7 @@ watchdog = ">=2.0" [package.extras] i18n = ["babel (>=2.9.0)"] -min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4)", "ghp-import (==1.0)", "importlib-metadata (==4.3)", "jinja2 (==2.11.1)", "markdown (==3.2.1)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "packaging (==20.5)", "pathspec (==0.11.1)", "platformdirs (==2.2.0)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "typing-extensions (==3.10)", "watchdog (==2.0)"] [[package]] name = "mkdocs-git-revision-date-plugin" @@ -1431,13 +1335,13 @@ mkdocs = ">=0.17" [[package]] name = "mkdocs-material" -version = "9.1.19" +version = "9.1.20" description = "Documentation that simply works" optional = false python-versions = ">=3.7" files = [ - {file = "mkdocs_material-9.1.19-py3-none-any.whl", hash = "sha256:fb0a149294b319aedf36983919d8c40c9e566db21ead16258e20ebd2e6c0961c"}, - {file = "mkdocs_material-9.1.19.tar.gz", hash = "sha256:73b94b08c765e92a80645aac58d6a741fc5f587deec2b715489c714827b15a6f"}, + {file = "mkdocs_material-9.1.20-py3-none-any.whl", hash = "sha256:152db66f667825d5aa3398386fe4d227640ec393c31e7cf109b114a569fc40fc"}, + {file = "mkdocs_material-9.1.20.tar.gz", hash = "sha256:91621b6a6002138c72d50a0beef20ed12cf367d2af27d1f53382562b3a9625c7"}, ] [package.dependencies] @@ -1528,13 +1432,13 @@ reports = ["lxml"] [[package]] name = "mypy-boto3-appconfig" -version = "1.28.0" -description = "Type annotations for boto3.AppConfig 1.28.0 service generated with mypy-boto3-builder 7.14.5" +version = "1.28.12" +description = "Type annotations for boto3.AppConfig 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-appconfig-1.28.0.tar.gz", hash = "sha256:753044339ce1da00e0b60f387ed957013712ab69ca51a9b56859a4ae502c806a"}, - {file = "mypy_boto3_appconfig-1.28.0-py3-none-any.whl", hash = "sha256:5708545675610ceb686339e90f4d6f8276f9e2ad2f15db2833ebc593185708f5"}, + {file = "mypy-boto3-appconfig-1.28.12.tar.gz", hash = "sha256:02080219c25b7d7837257d070109ba1fe55cd281eb1f8115ca94556d25453e29"}, + {file = "mypy_boto3_appconfig-1.28.12-py3-none-any.whl", hash = "sha256:4796bdae21a86de03198c181e514875287b2cbac46a7718d25128cb61c457f0a"}, ] [package.dependencies] @@ -1542,13 +1446,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-appconfigdata" -version = "1.28.0" -description = "Type annotations for boto3.AppConfigData 1.28.0 service generated with mypy-boto3-builder 7.14.5" +version = "1.28.12" +description = "Type annotations for boto3.AppConfigData 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-appconfigdata-1.28.0.tar.gz", hash = "sha256:1f0331fcb6642c44d335d08250ee5abcd840030304d480adf85ea017ba941bb4"}, - {file = "mypy_boto3_appconfigdata-1.28.0-py3-none-any.whl", hash = "sha256:c01ecbbda949497dbb650cc761915a7e14b621db5bc20cc1fd3f49620e74c9bc"}, + {file = "mypy-boto3-appconfigdata-1.28.12.tar.gz", hash = "sha256:b03bf529d366dd4e3deb2bb096049abfdb903381fb3382f3995be8fac6d48680"}, + {file = "mypy_boto3_appconfigdata-1.28.12-py3-none-any.whl", hash = "sha256:20c30fef5769479e0ae9ba1726aa15621875523a2a8e3d729db4964f110c1d04"}, ] [package.dependencies] @@ -1556,13 +1460,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-cloudformation" -version = "1.28.10" -description = "Type annotations for boto3.CloudFormation 1.28.10 service generated with mypy-boto3-builder 7.15.1" +version = "1.28.12" +description = "Type annotations for boto3.CloudFormation 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-cloudformation-1.28.10.tar.gz", hash = "sha256:81632665c3c9a648a665af390c555e5a8ad6bf0d4a48e4729aa1ead11b643aef"}, - {file = "mypy_boto3_cloudformation-1.28.10-py3-none-any.whl", hash = "sha256:c2623baf32c3c47976a4454d5812367ee27cee7f3ab0fbe98818ac9020db54c0"}, + {file = "mypy-boto3-cloudformation-1.28.12.tar.gz", hash = "sha256:2a276c52a4907d5f0b111a5fd0d880b20d188905cd8b672cde5ac46363a8b3fa"}, + {file = "mypy_boto3_cloudformation-1.28.12-py3-none-any.whl", hash = "sha256:7b16b8a3000f9dff13ead9edcebd34ee19e2130d213ecf05b371e02048f1a7a3"}, ] [package.dependencies] @@ -1570,13 +1474,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-cloudwatch" -version = "1.28.0" -description = "Type annotations for boto3.CloudWatch 1.28.0 service generated with mypy-boto3-builder 7.14.5" +version = "1.28.12" +description = "Type annotations for boto3.CloudWatch 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-cloudwatch-1.28.0.tar.gz", hash = "sha256:c34cc45c8a57702e11cf38de590af447d90cd3ea68328ea2908452d8a09d471a"}, - {file = "mypy_boto3_cloudwatch-1.28.0-py3-none-any.whl", hash = "sha256:8812c6120111798f84b2e1fe5808aae1f5766c183746ea336dec14f9bdf3308b"}, + {file = "mypy-boto3-cloudwatch-1.28.12.tar.gz", hash = "sha256:c3205f75845a6b9aff313f2e74e1f5e4b7462772946da9c27cf0fa09194f83b9"}, + {file = "mypy_boto3_cloudwatch-1.28.12-py3-none-any.whl", hash = "sha256:d81da96bc035265a53a72ff813250ae19af12a404f24adb86fcc1376c42de441"}, ] [package.dependencies] @@ -1584,13 +1488,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-dynamodb" -version = "1.28.11" -description = "Type annotations for boto3.DynamoDB 1.28.11 service generated with mypy-boto3-builder 7.15.1" +version = "1.28.12" +description = "Type annotations for boto3.DynamoDB 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-dynamodb-1.28.11.tar.gz", hash = "sha256:14b3fc0f091fad2c467733d410b1438b2747ab57292784abc0bc1adc3fa7ec60"}, - {file = "mypy_boto3_dynamodb-1.28.11-py3-none-any.whl", hash = "sha256:2b777cc080228e6c0e0b7a7e53b30d8c954637c7a5ec628f4397891f844ab0e8"}, + {file = "mypy-boto3-dynamodb-1.28.12.tar.gz", hash = "sha256:a201c983a36b336561558d5116bd32dd3e20888cf51ccdb4dae81fc203d34bef"}, + {file = "mypy_boto3_dynamodb-1.28.12-py3-none-any.whl", hash = "sha256:67862ae79d2304298d4730edd264d2b0b4dd2bf246eca9fec1e9bccecd3cca90"}, ] [package.dependencies] @@ -1598,13 +1502,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-lambda" -version = "1.28.11" -description = "Type annotations for boto3.Lambda 1.28.11 service generated with mypy-boto3-builder 7.15.1" +version = "1.28.12" +description = "Type annotations for boto3.Lambda 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-lambda-1.28.11.tar.gz", hash = "sha256:2df51192be0ff1d37b3d34ffc8e5849e352e812c65fd79b1fa85c439367e478f"}, - {file = "mypy_boto3_lambda-1.28.11-py3-none-any.whl", hash = "sha256:8449b569b7cb387d2be3b8db584f60789f08fe61866d11f8b8d64b2431989a18"}, + {file = "mypy-boto3-lambda-1.28.12.tar.gz", hash = "sha256:c0e8e276350d3d9fd2e6ad3711a1f292bbec617251f575b6edc7c11dbf12b2d3"}, + {file = "mypy_boto3_lambda-1.28.12-py3-none-any.whl", hash = "sha256:3e2c31ecb7caa820827bf330f13fda3e98fe8eb94f1710fb673e35570242d2e4"}, ] [package.dependencies] @@ -1612,13 +1516,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-logs" -version = "1.28.1" -description = "Type annotations for boto3.CloudWatchLogs 1.28.1 service generated with mypy-boto3-builder 7.14.5" +version = "1.28.12" +description = "Type annotations for boto3.CloudWatchLogs 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-logs-1.28.1.tar.gz", hash = "sha256:e0278a977d68c15120c0f2f4a85b46d0ca3e17c676c7d218dbbb3cfa1b4c8ef1"}, - {file = "mypy_boto3_logs-1.28.1-py3-none-any.whl", hash = "sha256:0ab3b7b39f1c3b0d530096fe2bfb9df09b989b0ea718e5ecbf823c32b016e319"}, + {file = "mypy-boto3-logs-1.28.12.tar.gz", hash = "sha256:776b84c0dab03b2b1d16b65792a10e577bf9e36fa44af2a2a20f8adb6f1608a1"}, + {file = "mypy_boto3_logs-1.28.12-py3-none-any.whl", hash = "sha256:d61490f4ced9e2a0673a526cb129ce4f75fa55198a84c0fa64a04da95d1e43b7"}, ] [package.dependencies] @@ -1626,13 +1530,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-s3" -version = "1.28.8" -description = "Type annotations for boto3.S3 1.28.8 service generated with mypy-boto3-builder 7.15.1" +version = "1.28.12" +description = "Type annotations for boto3.S3 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-s3-1.28.8.tar.gz", hash = "sha256:c9ed17fee2c0e2edeb2966b3796af7b349dcc4eeee54dbd59a269fdb9418eb55"}, - {file = "mypy_boto3_s3-1.28.8-py3-none-any.whl", hash = "sha256:75b929c517c5ad8f97c14dfba5f8521db569157dc4ac76a07a178805777cff8c"}, + {file = "mypy-boto3-s3-1.28.12.tar.gz", hash = "sha256:7d54b03e0bd72cc1fe690efdce9eeac9e2855dfbafa15390a0f518cae9e280d2"}, + {file = "mypy_boto3_s3-1.28.12-py3-none-any.whl", hash = "sha256:aceb69305c3e0af831d9a14314ea0afa9c93a9c393af25ea6a08fbc5ba350afd"}, ] [package.dependencies] @@ -1640,13 +1544,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-secretsmanager" -version = "1.28.3.post2" -description = "Type annotations for boto3.SecretsManager 1.28.3 service generated with mypy-boto3-builder 7.15.0" +version = "1.28.12" +description = "Type annotations for boto3.SecretsManager 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-secretsmanager-1.28.3.post2.tar.gz", hash = "sha256:f359f6446ac856d0887e40cb0f5bc6e0a60873524be5dd4b68be1d0fc4ac513e"}, - {file = "mypy_boto3_secretsmanager-1.28.3.post2-py3-none-any.whl", hash = "sha256:3a5e5619ee945f244d2dfefcb382c85874171a18b46f75403465622095284d25"}, + {file = "mypy-boto3-secretsmanager-1.28.12.tar.gz", hash = "sha256:f4d353549c7078b85a9174559f49cf39ef4ac67a25f2bc830f0e611455cb7ff1"}, + {file = "mypy_boto3_secretsmanager-1.28.12-py3-none-any.whl", hash = "sha256:94ce82843f4c02a7992dde707ca3378ad8ac13dd6eda947050a8638efd4e4f62"}, ] [package.dependencies] @@ -1654,13 +1558,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-ssm" -version = "1.28.0" -description = "Type annotations for boto3.SSM 1.28.0 service generated with mypy-boto3-builder 7.14.5" +version = "1.28.12" +description = "Type annotations for boto3.SSM 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-ssm-1.28.0.tar.gz", hash = "sha256:15482d2bff7995230549d145547f0ea92d01b68716aa25297e2a2da015922309"}, - {file = "mypy_boto3_ssm-1.28.0-py3-none-any.whl", hash = "sha256:e6ac60818c807baeeb0ef6714832c23904f2ed463fc40133059e2f63abd432fa"}, + {file = "mypy-boto3-ssm-1.28.12.tar.gz", hash = "sha256:78b4cef65e18ae6714022d95955a51208a1614ca2dc6757735c9527c96cd7c14"}, + {file = "mypy_boto3_ssm-1.28.12-py3-none-any.whl", hash = "sha256:5576d651129cdef275f2dcf7d20dd6df17e45d1534e5020e7a1aac32227215d8"}, ] [package.dependencies] @@ -1668,13 +1572,13 @@ typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.9\""} [[package]] name = "mypy-boto3-xray" -version = "1.28.0" -description = "Type annotations for boto3.XRay 1.28.0 service generated with mypy-boto3-builder 7.14.5" +version = "1.28.12" +description = "Type annotations for boto3.XRay 1.28.12 service generated with mypy-boto3-builder 7.15.2" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-boto3-xray-1.28.0.tar.gz", hash = "sha256:8ce07598f7eeabe66e8dc8cb7e906efb96198b9102f58e9315e6daf166abf3e7"}, - {file = "mypy_boto3_xray-1.28.0-py3-none-any.whl", hash = "sha256:64cd601a829c274665b977853f85b27464986e9eec1ebc03f5bc4530a400b2f6"}, + {file = "mypy-boto3-xray-1.28.12.tar.gz", hash = "sha256:88485fac6b070254aa7305efe50fe7b72c8fe6f18db2dd811497b0071d12729f"}, + {file = "mypy_boto3_xray-1.28.12-py3-none-any.whl", hash = "sha256:489ea65077897c50b32b2240bd6660e5199c27feb14d3fa7bbcc1872ef0bd702"}, ] [package.dependencies] @@ -1709,102 +1613,6 @@ doc = ["nb2plots (>=0.6)", "numpydoc (>=1.1)", "pillow (>=8.2)", "pydata-sphinx- extra = ["lxml (>=4.5)", "pydot (>=1.4.1)", "pygraphviz (>=1.7)"] test = ["codecov (>=2.1)", "pytest (>=6.2)", "pytest-cov (>=2.12)"] -[[package]] -name = "opentelemetry-api" -version = "1.18.0" -description = "OpenTelemetry Python API" -optional = false -python-versions = ">=3.7" -files = [ - {file = "opentelemetry_api-1.18.0-py3-none-any.whl", hash = "sha256:d05bcc94ec239fd76fd90d784c5e3ad081a8a1ac2ffc8a2c83a49ace052d1492"}, - {file = "opentelemetry_api-1.18.0.tar.gz", hash = "sha256:2bbf29739fcef268c419e3bf1735566c2e7f81026c14bcc78b62a0b97f8ecf2f"}, -] - -[package.dependencies] -deprecated = ">=1.2.6" -importlib-metadata = ">=6.0.0,<6.1.0" -setuptools = ">=16.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-common" -version = "1.18.0" -description = "OpenTelemetry Protobuf encoding" -optional = false -python-versions = ">=3.7" -files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.18.0-py3-none-any.whl", hash = "sha256:276073ccc8c6e6570fe05ca8ca0de77d662bc89bc614ec8bfbc855112f7e25e3"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.18.0.tar.gz", hash = "sha256:4d9883d6929aabe75e485950bbe8b149a14d95e50b1570426832daa6913b0871"}, -] - -[package.dependencies] -opentelemetry-proto = "1.18.0" - -[[package]] -name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.18.0" -description = "OpenTelemetry Collector Protobuf over gRPC Exporter" -optional = false -python-versions = ">=3.7" -files = [ - {file = "opentelemetry_exporter_otlp_proto_grpc-1.18.0-py3-none-any.whl", hash = "sha256:c773bc9df2c9d6464f0d5936963399b2fc440f0616c1277f29512d540ad7e0a2"}, - {file = "opentelemetry_exporter_otlp_proto_grpc-1.18.0.tar.gz", hash = "sha256:8eddfde4267da876871e62f1b58369986bdb7e47e43032c498f1ea807d7191c4"}, -] - -[package.dependencies] -backoff = {version = ">=1.10.0,<3.0.0", markers = "python_version >= \"3.7\""} -deprecated = ">=1.2.6" -googleapis-common-protos = ">=1.52,<2.0" -grpcio = ">=1.0.0,<2.0.0" -opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.18.0" -opentelemetry-proto = "1.18.0" -opentelemetry-sdk = ">=1.18.0,<1.19.0" - -[package.extras] -test = ["pytest-grpc"] - -[[package]] -name = "opentelemetry-proto" -version = "1.18.0" -description = "OpenTelemetry Python Proto" -optional = false -python-versions = ">=3.7" -files = [ - {file = "opentelemetry_proto-1.18.0-py3-none-any.whl", hash = "sha256:34d1c49283f0246a58761d9322d5a79702a09afda0bb181bb6378ed26862e446"}, - {file = "opentelemetry_proto-1.18.0.tar.gz", hash = "sha256:4f38d01049c3926b9fd09833574bfb5e172d84c8ca85e2ab7f4b5a198d75aeef"}, -] - -[package.dependencies] -protobuf = ">=3.19,<5.0" - -[[package]] -name = "opentelemetry-sdk" -version = "1.18.0" -description = "OpenTelemetry Python SDK" -optional = false -python-versions = ">=3.7" -files = [ - {file = "opentelemetry_sdk-1.18.0-py3-none-any.whl", hash = "sha256:a097cc1e0db6ff33b4d250a9350dc17975d24a22aa667fca2866e60c51306723"}, - {file = "opentelemetry_sdk-1.18.0.tar.gz", hash = "sha256:cd3230930a2ab288b1df149d261e9cd2bd48dee54ad18465a777831cb6779e90"}, -] - -[package.dependencies] -opentelemetry-api = "1.18.0" -opentelemetry-semantic-conventions = "0.39b0" -setuptools = ">=16.0" -typing-extensions = ">=3.7.4" - -[[package]] -name = "opentelemetry-semantic-conventions" -version = "0.39b0" -description = "OpenTelemetry Semantic Conventions" -optional = false -python-versions = ">=3.7" -files = [ - {file = "opentelemetry_semantic_conventions-0.39b0-py3-none-any.whl", hash = "sha256:0dd7a9dc0dfde2335f643705bba8f7c44182c797bc208b7601f0b8e8211cfd5c"}, - {file = "opentelemetry_semantic_conventions-0.39b0.tar.gz", hash = "sha256:06a9f198574e0dab6ebc072b59d89092cf9f115638a8a02157586769b6b7a69a"}, -] - [[package]] name = "packaging" version = "23.1" @@ -1900,28 +1708,6 @@ importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] -[[package]] -name = "protobuf" -version = "4.23.3" -description = "" -optional = false -python-versions = ">=3.7" -files = [ - {file = "protobuf-4.23.3-cp310-abi3-win32.whl", hash = "sha256:514b6bbd54a41ca50c86dd5ad6488afe9505901b3557c5e0f7823a0cf67106fb"}, - {file = "protobuf-4.23.3-cp310-abi3-win_amd64.whl", hash = "sha256:cc14358a8742c4e06b1bfe4be1afbdf5c9f6bd094dff3e14edb78a1513893ff5"}, - {file = "protobuf-4.23.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:2991f5e7690dab569f8f81702e6700e7364cc3b5e572725098215d3da5ccc6ac"}, - {file = "protobuf-4.23.3-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:08fe19d267608d438aa37019236db02b306e33f6b9902c3163838b8e75970223"}, - {file = "protobuf-4.23.3-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:3b01a5274ac920feb75d0b372d901524f7e3ad39c63b1a2d55043f3887afe0c1"}, - {file = "protobuf-4.23.3-cp37-cp37m-win32.whl", hash = "sha256:aca6e86a08c5c5962f55eac9b5bd6fce6ed98645d77e8bfc2b952ecd4a8e4f6a"}, - {file = "protobuf-4.23.3-cp37-cp37m-win_amd64.whl", hash = "sha256:0149053336a466e3e0b040e54d0b615fc71de86da66791c592cc3c8d18150bf8"}, - {file = "protobuf-4.23.3-cp38-cp38-win32.whl", hash = "sha256:84ea0bd90c2fdd70ddd9f3d3fc0197cc24ecec1345856c2b5ba70e4d99815359"}, - {file = "protobuf-4.23.3-cp38-cp38-win_amd64.whl", hash = "sha256:3bcbeb2bf4bb61fe960dd6e005801a23a43578200ea8ceb726d1f6bd0e562ba1"}, - {file = "protobuf-4.23.3-cp39-cp39-win32.whl", hash = "sha256:5cb9e41188737f321f4fce9a4337bf40a5414b8d03227e1d9fbc59bc3a216e35"}, - {file = "protobuf-4.23.3-cp39-cp39-win_amd64.whl", hash = "sha256:29660574cd769f2324a57fb78127cda59327eb6664381ecfe1c69731b83e8288"}, - {file = "protobuf-4.23.3-py3-none-any.whl", hash = "sha256:447b9786ac8e50ae72cae7a2eec5c5df6a9dbf9aa6f908f1b8bda6032644ea62"}, - {file = "protobuf-4.23.3.tar.gz", hash = "sha256:7a92beb30600332a52cdadbedb40d33fd7c8a0d7f549c440347bc606fb3fe34b"}, -] - [[package]] name = "publication" version = "0.0.3" @@ -2531,22 +2317,6 @@ starlette = ["starlette (>=0.19.1)"] starlite = ["starlite (>=1.48)"] tornado = ["tornado (>=5)"] -[[package]] -name = "setuptools" -version = "68.0.0" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -optional = false -python-versions = ">=3.7" -files = [ - {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, - {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, -] - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - [[package]] name = "six" version = "1.16.0" @@ -2805,7 +2575,7 @@ watchmedo = ["PyYAML (>=3.10)"] name = "wrapt" version = "1.15.0" description = "Module for decorators, wrappers and monkey patching." -optional = false +optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" files = [ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"}, diff --git a/pyproject.toml b/pyproject.toml index 77ed14226e4..5b085f6bb91 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,6 @@ aws-xray-sdk = { version = "^2.8.0", optional = true } fastjsonschema = { version = "^2.14.5", optional = true } pydantic = { version = "^1.8.2", optional = true } boto3 = { version = "^1.20.32", optional = true } -datadog_lambda = { version = "^4.70.0", optional = true } typing-extensions = "^4.6.2" [tool.poetry.dev-dependencies] @@ -41,7 +40,6 @@ coverage = {extras = ["toml"], version = "^7.2"} pytest = "^7.4.0" black = "^23.3" boto3 = "^1.18" -datadog_lambda = "^4.70.0" isort = "^5.11.5" pytest-cov = "^4.1.0" pytest-mock = "^3.11.1" @@ -83,7 +81,6 @@ aws-requests-auth = "^0.4.3" parser = ["pydantic"] validation = ["fastjsonschema"] tracer = ["aws-xray-sdk"] -datadog = ["datadog_lambda"] all = ["pydantic", "aws-xray-sdk", "fastjsonschema"] # allow customers to run code locally without emulators (SAM CLI, etc.) aws-sdk = ["boto3"] diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 342f0cc1e50..fee3a3a9fc8 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1,5 +1,4 @@ import json -import os import warnings from collections import namedtuple from typing import Any, Dict, List, Union @@ -23,8 +22,6 @@ ) from aws_lambda_powertools.metrics.provider import ( AmazonCloudWatchEMF, - DataDogMetrics, - DataDogProvider, MetricsBase, MetricsProviderBase, ) @@ -1358,92 +1355,3 @@ def lambda_handler(evt, context): # and specifically about the lack of Metrics with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): lambda_handler({}, {}) - - -def test_datadog_coldstart(capsys): - reset_cold_start_flag_provider() - dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) - metrics = DataDogMetrics(provider=dd_provider) - - LambdaContext = namedtuple("LambdaContext", "function_name") - - @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) - def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) - - lambda_handler({}, LambdaContext("example_fn2")) - logs = capsys.readouterr().out.strip() - assert "ColdStart" in logs - - -def test_datadog_write_to_log(capsys): - os.environ["DD_FLUSH_TO_LOG"] = "True" - dd_provider = DataDogProvider(namespace="Serverlesspresso") - metrics = DataDogMetrics(provider=dd_provider) - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) - metrics.flush_metrics() - logs = capture_metrics_output(capsys) - logs["e"] = "" - assert logs == json.loads('{"m":"Serverlesspresso.item_sold","v":1,"e":"","t":["product:latte","order:online"]}') - - -def test_datadog_namespace(capsys): - dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) - metrics = DataDogMetrics(provider=dd_provider) - - LambdaContext = namedtuple("LambdaContext", "function_name") - - @metrics.log_metrics(capture_cold_start_metric=True, raise_on_empty_metrics=True) - def lambda_handler(event, context): - metrics.add_metric(name="item_sold", value=1, tags=["product:latte", "order:online"]) - - lambda_handler({}, LambdaContext("example_fn")) - logs = capsys.readouterr().out.strip() - assert "Serverlesspresso" in logs - - -def test_datadog_raise_on_empty(): - dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) - metrics = DataDogMetrics(provider=dd_provider) - - LambdaContext = namedtuple("LambdaContext", "function_name") - - @metrics.log_metrics(capture_cold_start_metric=False, raise_on_empty_metrics=True) - def lambda_handler(event, context): - pass - - with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): - lambda_handler({}, LambdaContext("example_fn")) - - -def test_datadog_args(capsys): - dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) - metrics = DataDogMetrics(provider=dd_provider) - metrics.add_metric("order_valve", 12.45, sales="sam") - metrics.flush_metrics() - logs = capsys.readouterr().out.strip() - log_dict = json.loads(logs) - tag_list = log_dict.get("t") - assert "sales:sam" in tag_list - - -def test_datadog_kwargs(capsys): - dd_provider = DataDogProvider(namespace="Serverlesspresso", flush_to_log=True) - metrics = DataDogMetrics(provider=dd_provider) - metrics.add_metric( - name="order_valve", - value=12.45, - tags=["test:kwargs"], - str="str", - int=123, - float=45.6, - dict={"type": "termination identified"}, - ) - metrics.flush_metrics() - logs = capsys.readouterr().out.strip() - log_dict = json.loads(logs) - tag_list = log_dict.get("t") - assert "test:kwargs" in tag_list - assert "str:str" in tag_list - assert "int:123" in tag_list - assert "float:45.6" in tag_list From bdaa73616fe9beef9b6698385b13c7acad9e0a9f Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Thu, 27 Jul 2023 14:42:14 +0100 Subject: [PATCH 25/32] refactoring: importing from typing_extensions --- aws_lambda_powertools/metrics/provider/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 83e7833a2be..bd0e5cf9abd 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -2,7 +2,9 @@ import functools import logging -from typing import Any, Callable, Dict, Optional, Protocol +from typing import Any, Callable, Dict, Optional + +from typing_extensions import Protocol logger = logging.getLogger(__name__) From 7ac42be83af06561d68c43521f9f4f82daf5262c Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Jul 2023 00:33:04 +0100 Subject: [PATCH 26/32] refactoring EMF provider --- aws_lambda_powertools/metrics/__init__.py | 9 +- aws_lambda_powertools/metrics/metric.py | 2 +- aws_lambda_powertools/metrics/metrics.py | 4 +- .../metrics/provider/__init__.py | 4 - .../metrics/provider/amazon_cloudwatch_emf.py | 790 +++++++++++++++++- .../metrics/provider/base.py | 4 +- tests/functional/test_metrics.py | 42 +- 7 files changed, 806 insertions(+), 49 deletions(-) diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index a8c1f7312d9..911cf85f544 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -1,18 +1,17 @@ """CloudWatch Embedded Metric Format utility """ -from aws_lambda_powertools.metrics.base import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.exceptions import ( MetricResolutionError, MetricUnitError, MetricValueError, SchemaValidationError, ) -from aws_lambda_powertools.metrics.metric import single_metric from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - AmazonCloudWatchEMF, - EphemeralAmazonCloudWatchEMF, EphemeralMetrics, + MetricResolution, Metrics, + MetricUnit, + single_metric, ) __all__ = [ @@ -25,6 +24,4 @@ "MetricResolutionError", "SchemaValidationError", "MetricValueError", - "AmazonCloudWatchEMF", - "EphemeralAmazonCloudWatchEMF", ] diff --git a/aws_lambda_powertools/metrics/metric.py b/aws_lambda_powertools/metrics/metric.py index e2ac49df489..4acd8b5eb4a 100644 --- a/aws_lambda_powertools/metrics/metric.py +++ b/aws_lambda_powertools/metrics/metric.py @@ -1,4 +1,4 @@ # NOTE: prevents circular inheritance import -from aws_lambda_powertools.metrics.base import SingleMetric, single_metric +from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import SingleMetric, single_metric __all__ = ["SingleMetric", "single_metric"] diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index ef85700da7e..4b3216000fa 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -1,9 +1,7 @@ # NOTE: keeps for compatibility from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - AmazonCloudWatchEMF, - EphemeralAmazonCloudWatchEMF, EphemeralMetrics, Metrics, ) -__all__ = ["Metrics", "EphemeralMetrics", "AmazonCloudWatchEMF", "EphemeralAmazonCloudWatchEMF"] +__all__ = ["Metrics", "EphemeralMetrics"] diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index cecdf2de588..1ae7b79b09b 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -1,6 +1,4 @@ from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - AmazonCloudWatchEMF, - EphemeralAmazonCloudWatchEMF, EphemeralMetrics, Metrics, ) @@ -10,7 +8,5 @@ "MetricsBase", "MetricsProviderBase", "Metrics", - "AmazonCloudWatchEMF", - "EphemeralAmazonCloudWatchEMF", "EphemeralMetrics", ] diff --git a/aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py b/aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py index 82639d863aa..66c54b57c47 100644 --- a/aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py +++ b/aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py @@ -1,11 +1,509 @@ from __future__ import annotations -from typing import Any, Dict +import datetime +import functools +import json +import logging +import numbers +import os +import warnings +from collections import defaultdict +from contextlib import contextmanager +from enum import Enum +from typing import Any, Callable, Dict, Generator, List, Optional, Union + +from aws_lambda_powertools.metrics.exceptions import ( + MetricResolutionError, + MetricUnitError, + MetricValueError, + SchemaValidationError, +) +from aws_lambda_powertools.metrics.provider.base import MetricsProviderBase +from aws_lambda_powertools.metrics.types import MetricNameUnitResolution +from aws_lambda_powertools.shared import constants +from aws_lambda_powertools.shared.functions import resolve_env_var_choice + +logger = logging.getLogger(__name__) + +MAX_METRICS = 100 +MAX_DIMENSIONS = 29 + +is_cold_start = True + + +class MetricResolution(Enum): + Standard = 60 + High = 1 + + +class MetricUnit(Enum): + Seconds = "Seconds" + Microseconds = "Microseconds" + Milliseconds = "Milliseconds" + Bytes = "Bytes" + Kilobytes = "Kilobytes" + Megabytes = "Megabytes" + Gigabytes = "Gigabytes" + Terabytes = "Terabytes" + Bits = "Bits" + Kilobits = "Kilobits" + Megabits = "Megabits" + Gigabits = "Gigabits" + Terabits = "Terabits" + Percent = "Percent" + Count = "Count" + BytesPerSecond = "Bytes/Second" + KilobytesPerSecond = "Kilobytes/Second" + MegabytesPerSecond = "Megabytes/Second" + GigabytesPerSecond = "Gigabytes/Second" + TerabytesPerSecond = "Terabytes/Second" + BitsPerSecond = "Bits/Second" + KilobitsPerSecond = "Kilobits/Second" + MegabitsPerSecond = "Megabits/Second" + GigabitsPerSecond = "Gigabits/Second" + TerabitsPerSecond = "Terabits/Second" + CountPerSecond = "Count/Second" + + +class AmazonCloudWatchEMFProvider(MetricsProviderBase): + """Base class for metric functionality (namespace, metric, dimension, serialization) + + MetricManager creates metrics asynchronously thanks to CloudWatch Embedded Metric Format (EMF). + CloudWatch EMF can create up to 100 metrics per EMF object + and metrics, dimensions, and namespace created via MetricManager + will adhere to the schema, will be serialized and validated against EMF Schema. + + **Use `aws_lambda_powertools.metrics.metrics.Metrics` or + `aws_lambda_powertools.metrics.metric.single_metric` to create EMF metrics.** -from aws_lambda_powertools.metrics.base import MetricManager + Environment variables + --------------------- + POWERTOOLS_METRICS_NAMESPACE : str + metric namespace to be set for all metrics + POWERTOOLS_SERVICE_NAME : str + service name used for default dimension + + Raises + ------ + MetricUnitError + When metric unit isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch + MetricValueError + When metric value isn't a number + SchemaValidationError + When metric object fails EMF schema validation + """ + + def __init__( + self, + metric_set: Dict[str, Any] | None = None, + dimension_set: Dict | None = None, + namespace: str | None = None, + metadata_set: Dict[str, Any] | None = None, + service: str | None = None, + ): + self.metric_set = metric_set if metric_set is not None else {} + self.dimension_set = dimension_set if dimension_set is not None else {} + self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV)) + self.service = resolve_env_var_choice(choice=service, env=os.getenv(constants.SERVICE_NAME_ENV)) + self.metadata_set = metadata_set if metadata_set is not None else {} + self._metric_units = [unit.value for unit in MetricUnit] + self._metric_unit_valid_options = list(MetricUnit.__members__) + self._metric_resolutions = [resolution.value for resolution in MetricResolution] + + def add_metric( + self, + name: str, + unit: MetricUnit | str, + value: float, + resolution: MetricResolution | int = 60, + ) -> None: + """Adds given metric + + Example + ------- + **Add given metric using MetricUnit enum** + + metric.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1) + + **Add given metric using plain string as value unit** + + metric.add_metric(name="BookingConfirmation", unit="Count", value=1) + + **Add given metric with MetricResolution non default value** + + metric.add_metric(name="BookingConfirmation", unit="Count", value=1, resolution=MetricResolution.High) + + Parameters + ---------- + name : str + Metric name + unit : Union[MetricUnit, str] + `aws_lambda_powertools.helper.models.MetricUnit` + value : float + Metric value + resolution : Union[MetricResolution, int] + `aws_lambda_powertools.helper.models.MetricResolution` + + Raises + ------ + MetricUnitError + When metric unit is not supported by CloudWatch + MetricResolutionError + When metric resolution is not supported by CloudWatch + """ + if not isinstance(value, numbers.Number): + raise MetricValueError(f"{value} is not a valid number") + + unit = self._extract_metric_unit_value(unit=unit) + resolution = self._extract_metric_resolution_value(resolution=resolution) + metric: Dict = self.metric_set.get(name, defaultdict(list)) + metric["Unit"] = unit + metric["StorageResolution"] = resolution + metric["Value"].append(float(value)) + logger.debug(f"Adding metric: {name} with {metric}") + self.metric_set[name] = metric + + if len(self.metric_set) == MAX_METRICS or len(metric["Value"]) == MAX_METRICS: + logger.debug(f"Exceeded maximum of {MAX_METRICS} metrics - Publishing existing metric set") + metrics = self.serialize_metric_set() + print(json.dumps(metrics)) + + # clear metric set only as opposed to metrics and dimensions set + # since we could have more than 100 metrics + self.metric_set.clear() + + def serialize_metric_set( + self, + metrics: Dict | None = None, + dimensions: Dict | None = None, + metadata: Dict | None = None, + ) -> Dict: + """Serializes metric and dimensions set + + Parameters + ---------- + metrics : Dict, optional + Dictionary of metrics to serialize, by default None + dimensions : Dict, optional + Dictionary of dimensions to serialize, by default None + metadata: Dict, optional + Dictionary of metadata to serialize, by default None + + Example + ------- + **Serialize metrics into EMF format** + + metrics = MetricManager() + # ...add metrics, dimensions, namespace + ret = metrics.serialize_metric_set() + + Returns + ------- + Dict + Serialized metrics following EMF specification + + Raises + ------ + SchemaValidationError + Raised when serialization fail schema validation + """ + if metrics is None: # pragma: no cover + metrics = self.metric_set + + if dimensions is None: # pragma: no cover + dimensions = self.dimension_set + + if metadata is None: # pragma: no cover + metadata = self.metadata_set + + if self.service and not self.dimension_set.get("service"): + # self.service won't be a float + self.add_dimension(name="service", value=self.service) + + if len(metrics) == 0: + raise SchemaValidationError("Must contain at least one metric.") + + if self.namespace is None: + raise SchemaValidationError("Must contain a metric namespace.") + + logger.debug({"details": "Serializing metrics", "metrics": metrics, "dimensions": dimensions}) + + # For standard resolution metrics, don't add StorageResolution field to avoid unnecessary ingestion of data into cloudwatch # noqa E501 + # Example: [ { "Name": "metric_name", "Unit": "Count"} ] # noqa ERA001 + # + # In case using high-resolution metrics, add StorageResolution field + # Example: [ { "Name": "metric_name", "Unit": "Count", "StorageResolution": 1 } ] # noqa ERA001 + metric_definition: List[MetricNameUnitResolution] = [] + metric_names_and_values: Dict[str, float] = {} # { "metric_name": 1.0 } + + for metric_name in metrics: + metric: dict = metrics[metric_name] + metric_value: int = metric.get("Value", 0) + metric_unit: str = metric.get("Unit", "") + metric_resolution: int = metric.get("StorageResolution", 60) + + metric_definition_data: MetricNameUnitResolution = {"Name": metric_name, "Unit": metric_unit} + + # high-resolution metrics + if metric_resolution == 1: + metric_definition_data["StorageResolution"] = metric_resolution + + metric_definition.append(metric_definition_data) + + metric_names_and_values.update({metric_name: metric_value}) + + return { + "_aws": { + "Timestamp": int(datetime.datetime.now().timestamp() * 1000), # epoch + "CloudWatchMetrics": [ + { + "Namespace": self.namespace, # "test_namespace" + "Dimensions": [list(dimensions.keys())], # [ "service" ] + "Metrics": metric_definition, + }, + ], + }, + **dimensions, # "service": "test_service" + **metadata, # "username": "test" + **metric_names_and_values, # "single_metric": 1.0 + } + + def add_dimension(self, name: str, value: str) -> None: + """Adds given dimension to all metrics + + Example + ------- + **Add a metric dimensions** + + metric.add_dimension(name="operation", value="confirm_booking") + + Parameters + ---------- + name : str + Dimension name + value : str + Dimension value + """ + logger.debug(f"Adding dimension: {name}:{value}") + if len(self.dimension_set) == MAX_DIMENSIONS: + raise SchemaValidationError( + f"Maximum number of dimensions exceeded ({MAX_DIMENSIONS}): Unable to add dimension {name}.", + ) + # Cast value to str according to EMF spec + # Majority of values are expected to be string already, so + # checking before casting improves performance in most cases + self.dimension_set[name] = value if isinstance(value, str) else str(value) + + def add_metadata(self, key: str, value: Any) -> None: + """Adds high cardinal metadata for metrics object + + This will not be available during metrics visualization. + Instead, this will be searchable through logs. + + If you're looking to add metadata to filter metrics, then + use add_dimensions method. + + Example + ------- + **Add metrics metadata** + + metric.add_metadata(key="booking_id", value="booking_id") + + Parameters + ---------- + key : str + Metadata key + value : any + Metadata value + """ + logger.debug(f"Adding metadata: {key}:{value}") + + # Cast key to str according to EMF spec + # Majority of keys are expected to be string already, so + # checking before casting improves performance in most cases + if isinstance(key, str): + self.metadata_set[key] = value + else: + self.metadata_set[str(key)] = value + + def clear_metrics(self) -> None: + logger.debug("Clearing out existing metric set from memory") + self.metric_set.clear() + self.dimension_set.clear() + self.metadata_set.clear() + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + """Manually flushes the metrics. This is normally not necessary, + unless you're running on other runtimes besides Lambda, where the @log_metrics + decorator already handles things for you. + + Parameters + ---------- + raise_on_empty_metrics : bool, optional + raise exception if no metrics are emitted, by default False + """ + if not raise_on_empty_metrics and not self.metric_set: + warnings.warn( + "No application metrics to publish. The cold-start metric may be published if enabled. " + "If application metrics should never be empty, consider using 'raise_on_empty_metrics'", + stacklevel=2, + ) + else: + logger.debug("Flushing existing metrics") + metrics = self.serialize_metric_set() + print(json.dumps(metrics, separators=(",", ":"))) + self.clear_metrics() + + def log_metrics( + self, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + default_dimensions: Dict[str, str] | None = None, + ): + """Decorator to serialize and publish metrics at the end of a function execution. + + Be aware that the log_metrics **does call* the decorated function (e.g. lambda_handler). + + Example + ------- + **Lambda function using tracer and metrics decorators** + + from aws_lambda_powertools import Metrics, Tracer + + metrics = Metrics(service="payment") + tracer = Tracer(service="payment") + + @tracer.capture_lambda_handler + @metrics.log_metrics + def handler(event, context): + ... + + Parameters + ---------- + lambda_handler : Callable[[Any, Any], Any], optional + lambda function handler, by default None + capture_cold_start_metric : bool, optional + captures cold start metric, by default False + raise_on_empty_metrics : bool, optional + raise exception if no metrics are emitted, by default False + default_dimensions: Dict[str, str], optional + metric dimensions as key=value that will always be present + + Raises + ------ + e + Propagate error received + """ + + # If handler is None we've been called with parameters + # Return a partial function with args filled + if lambda_handler is None: + logger.debug("Decorator called with parameters") + return functools.partial( + self.log_metrics, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, + ) + + @functools.wraps(lambda_handler) + def decorate(event, context): + try: + if default_dimensions: + self.set_default_dimensions(**default_dimensions) + response = lambda_handler(event, context) + if capture_cold_start_metric: + self._add_cold_start_metric(context=context) + finally: + self.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) + + return response + + return decorate + + def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: + """Return metric value from metric unit whether that's str or MetricResolution enum + + Parameters + ---------- + unit : Union[int, MetricResolution] + Metric resolution + Returns + ------- + int + Metric resolution value must be 1 or 60 + + Raises + ------ + MetricResolutionError + When metric resolution is not supported by CloudWatch + """ + if isinstance(resolution, MetricResolution): + return resolution.value + + if isinstance(resolution, int) and resolution in self._metric_resolutions: + return resolution + + raise MetricResolutionError( + f"Invalid metric resolution '{resolution}', expected either option: {self._metric_resolutions}", # noqa: E501 + ) -class Metrics(MetricManager): + def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: + """Return metric value from metric unit whether that's str or MetricUnit enum + + Parameters + ---------- + unit : Union[str, MetricUnit] + Metric unit + + Returns + ------- + str + Metric unit value (e.g. "Seconds", "Count/Second") + + Raises + ------ + MetricUnitError + When metric unit is not supported by CloudWatch + """ + + if isinstance(unit, str): + if unit in self._metric_unit_valid_options: + unit = MetricUnit[unit].value + + if unit not in self._metric_units: + raise MetricUnitError( + f"Invalid metric unit '{unit}', expected either option: {self._metric_unit_valid_options}", + ) + + if isinstance(unit, MetricUnit): + unit = unit.value + + return unit + + def _add_cold_start_metric(self, context: Any) -> None: + """Add cold start metric and function_name dimension + + Parameters + ---------- + context : Any + Lambda context + """ + global is_cold_start + if is_cold_start: + logger.debug("Adding cold start metric and function_name dimension") + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace=self.namespace) as metric: + metric.add_dimension(name="function_name", value=context.function_name) + if self.service: + metric.add_dimension(name="service", value=str(self.service)) + is_cold_start = False + + +class Metrics(AmazonCloudWatchEMFProvider): """Metrics create an EMF object with up to 100 metrics Use Metrics when you need to create multiple metrics that have @@ -71,21 +569,79 @@ def lambda_handler(): _metadata: Dict[str, Any] = {} _default_dimensions: Dict[str, Any] = {} - def __init__(self, service: str | None = None, namespace: str | None = None): + def __init__( + self, + service: str | None = None, + namespace: str | None = None, + provider: AmazonCloudWatchEMFProvider | None = None, + ): self.metric_set = self._metrics self.metadata_set = self._metadata self.default_dimensions = self._default_dimensions self.dimension_set = self._dimensions self.dimension_set.update(**self._default_dimensions) - super().__init__( - namespace=namespace, - service=service, - metric_set=self.metric_set, - dimension_set=self.dimension_set, - metadata_set=self.metadata_set, + + if provider is None: + self.provider = AmazonCloudWatchEMFProvider( + namespace=namespace, + service=service, + metric_set=self.metric_set, + dimension_set=self.dimension_set, + metadata_set=self.metadata_set, + ) + else: + self.provider = provider + + def add_metric( + self, + name: str, + unit: MetricUnit | str, + value: float, + resolution: MetricResolution | int = 60, + ) -> None: + self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) + + def add_dimension(self, name: str, value: str) -> None: + self.provider.add_dimension(name=name, value=value) + + def serialize_metric_set( + self, + metrics: Dict | None = None, + dimensions: Dict | None = None, + metadata: Dict | None = None, + ) -> Dict: + return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) + + def add_metadata(self, key: str, value: Any) -> None: + self.provider.add_metadata(key=key, value=value) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) + + def log_metrics( + self, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + default_dimensions: Dict[str, str] | None = None, + ): + return self.provider.log_metrics( + lambda_handler=lambda_handler, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, ) + def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: + return self.provider._extract_metric_resolution_value(resolution=resolution) + + def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: + return self.provider._extract_metric_unit_value(unit=unit) + + def _add_cold_start_metric(self, context: Any) -> None: + self.provider._add_cold_start_metric(context=context) + def set_default_dimensions(self, **dimensions) -> None: """Persist dimensions across Lambda invocations @@ -116,16 +672,12 @@ def clear_default_dimensions(self) -> None: self.default_dimensions.clear() def clear_metrics(self) -> None: - super().clear_metrics() + self.provider.clear_metrics() # re-add default dimensions self.set_default_dimensions(**self.default_dimensions) -# add alias for original EMF format to make the provider more explicit -AmazonCloudWatchEMF = Metrics - - -class EphemeralMetrics(MetricManager): +class EphemeralMetrics(AmazonCloudWatchEMFProvider): """Non-singleton version of Metrics to not persist metrics across instances NOTE: This is useful when you want to: @@ -137,12 +689,70 @@ class EphemeralMetrics(MetricManager): _dimensions: Dict[str, str] = {} _default_dimensions: Dict[str, Any] = {} - def __init__(self, service: str | None = None, namespace: str | None = None): + def __init__( + self, + service: str | None = None, + namespace: str | None = None, + provider: AmazonCloudWatchEMFProvider | None = None, + ): self.default_dimensions = self._default_dimensions self.dimension_set = self._dimensions self.dimension_set.update(**self._default_dimensions) - super().__init__(namespace=namespace, service=service) + + if provider is None: + self.provider = AmazonCloudWatchEMFProvider(namespace=namespace, service=service) + else: + self.provider = provider + + def add_metric( + self, + name: str, + unit: MetricUnit | str, + value: float, + resolution: MetricResolution | int = 60, + ) -> None: + return self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) + + def add_dimension(self, name: str, value: str) -> None: + return self.provider.add_dimension(name=name, value=value) + + def serialize_metric_set( + self, + metrics: Dict | None = None, + dimensions: Dict | None = None, + metadata: Dict | None = None, + ) -> Dict: + return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) + + def add_metadata(self, key: str, value: Any) -> None: + self.provider.add_metadata(key=key, value=value) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) + + def log_metrics( + self, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + default_dimensions: Dict[str, str] | None = None, + ): + return self.provider.log_metrics( + lambda_handler=lambda_handler, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, + ) + + def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: + return self.provider._extract_metric_resolution_value(resolution=resolution) + + def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: + return self.provider._extract_metric_unit_value(unit=unit) + + def _add_cold_start_metric(self, context: Any) -> None: + self.provider._add_cold_start_metric(context=context) def set_default_dimensions(self, **dimensions) -> None: """Persist dimensions across Lambda invocations @@ -174,9 +784,149 @@ def clear_default_dimensions(self) -> None: self.default_dimensions.clear() def clear_metrics(self) -> None: - super().clear_metrics() + self.provider.clear_metrics() # re-add default dimensions self.set_default_dimensions(**self.default_dimensions) -EphemeralAmazonCloudWatchEMF = EphemeralMetrics +class SingleMetric(AmazonCloudWatchEMFProvider): + """SingleMetric creates an EMF object with a single metric. + + EMF specification doesn't allow metrics with different dimensions. + SingleMetric overrides MetricManager's add_metric method to do just that. + + Use `single_metric` when you need to create metrics with different dimensions, + otherwise `aws_lambda_powertools.metrics.metrics.Metrics` is + a more cost effective option + + Environment variables + --------------------- + POWERTOOLS_METRICS_NAMESPACE : str + metric namespace + + Example + ------- + **Creates cold start metric with function_version as dimension** + + import json + from aws_lambda_powertools.metrics import single_metric, MetricUnit, MetricResolution + metric = single_metric(namespace="ServerlessAirline") + + metric.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard) + metric.add_dimension(name="function_version", value=47) + + print(json.dumps(metric.serialize_metric_set(), indent=4)) + + Parameters + ---------- + MetricManager : MetricManager + Inherits from `aws_lambda_powertools.metrics.base.MetricManager` + """ + + def add_metric( + self, + name: str, + unit: MetricUnit | str, + value: float, + resolution: MetricResolution | int = 60, + ) -> None: + """Method to prevent more than one metric being created + + Parameters + ---------- + name : str + Metric name (e.g. BookingConfirmation) + unit : MetricUnit + Metric unit (e.g. "Seconds", MetricUnit.Seconds) + value : float + Metric value + resolution : MetricResolution + Metric resolution (e.g. 60, MetricResolution.Standard) + """ + if len(self.metric_set) > 0: + logger.debug(f"Metric {name} already set, skipping...") + return + return super().add_metric(name, unit, value, resolution) + + +@contextmanager +def single_metric( + name: str, + unit: MetricUnit, + value: float, + resolution: MetricResolution | int = 60, + namespace: str | None = None, + default_dimensions: Dict[str, str] | None = None, +) -> Generator[SingleMetric, None, None]: + """Context manager to simplify creation of a single metric + + Example + ------- + **Creates cold start metric with function_version as dimension** + + from aws_lambda_powertools import single_metric + from aws_lambda_powertools.metrics import MetricUnit + from aws_lambda_powertools.metrics import MetricResolution + + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard, namespace="ServerlessAirline") as metric: + metric.add_dimension(name="function_version", value="47") + + **Same as above but set namespace using environment variable** + + $ export POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" + + from aws_lambda_powertools import single_metric + from aws_lambda_powertools.metrics import MetricUnit + from aws_lambda_powertools.metrics import MetricResolution + + with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard) as metric: + metric.add_dimension(name="function_version", value="47") + + Parameters + ---------- + name : str + Metric name + unit : MetricUnit + `aws_lambda_powertools.helper.models.MetricUnit` + resolution : MetricResolution + `aws_lambda_powertools.helper.models.MetricResolution` + value : float + Metric value + namespace: str + Namespace for metrics + + Yields + ------- + SingleMetric + SingleMetric class instance + + Raises + ------ + MetricUnitError + When metric metric isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch + MetricValueError + When metric value isn't a number + SchemaValidationError + When metric object fails EMF schema validation + """ # noqa: E501 + metric_set: Dict | None = None + try: + metric: SingleMetric = SingleMetric(namespace=namespace) + metric.add_metric(name=name, unit=unit, value=value, resolution=resolution) + + if default_dimensions: + for dim_name, dim_value in default_dimensions.items(): + metric.add_dimension(name=dim_name, value=dim_value) + + yield metric + metric_set = metric.serialize_metric_set() + finally: + print(json.dumps(metric_set, separators=(",", ":"))) + + +def reset_cold_start_flag(): + global is_cold_start + if not is_cold_start: + is_cold_start = True diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index bd0e5cf9abd..12cc74d028b 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -49,7 +49,7 @@ def add_metric(self, *args: Any, **kwargs: Any) -> Any: """ raise NotImplementedError - def serialize(self, *args: Any, **kwargs: Any) -> Any: + def serialize_metric_set(self, *args: Any, **kwargs: Any) -> Any: """ Abstract method for serialize a metric. @@ -75,7 +75,7 @@ def serialize(self, *args: Any, **kwargs: Any) -> Any: raise NotImplementedError # flush serialized data to output, or send to API directly - def flush(self, *args: Any, **kwargs) -> Any: + def flush_metrics(self, *args: Any, **kwargs) -> Any: """ Abstract method for flushing a metric. diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index fee3a3a9fc8..055de54db2b 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -15,16 +15,15 @@ MetricValueError, SchemaValidationError, ) -from aws_lambda_powertools.metrics.base import ( - MAX_DIMENSIONS, - MetricManager, - reset_cold_start_flag, -) from aws_lambda_powertools.metrics.provider import ( - AmazonCloudWatchEMF, MetricsBase, MetricsProviderBase, ) +from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( + MAX_DIMENSIONS, + AmazonCloudWatchEMFProvider, + reset_cold_start_flag, +) from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider @@ -116,7 +115,7 @@ def serialize_metrics( metadatas: List[Dict] = None, ) -> Dict: """Helper function to build EMF object from a list of metrics, dimensions""" - my_metrics = MetricManager(namespace=namespace) + my_metrics = AmazonCloudWatchEMFProvider(namespace=namespace) for dimension in dimensions: my_metrics.add_dimension(**dimension) @@ -133,7 +132,7 @@ def serialize_metrics( def serialize_single_metric(metric: Dict, dimension: Dict, namespace: str, metadata: Dict = None) -> Dict: """Helper function to build EMF object from a given metric, dimension and namespace""" - my_metrics = MetricManager(namespace=namespace) + my_metrics = AmazonCloudWatchEMFProvider(namespace=namespace) my_metrics.add_metric(**metric) my_metrics.add_dimension(**dimension) @@ -234,6 +233,27 @@ def test_single_metric_default_dimensions_inherit(capsys, metric, dimension, nam assert expected == output +def test_log_metrics_preconfigured_provider(capsys, metrics, dimensions, namespace): + # GIVEN Metrics is initialized + provider = AmazonCloudWatchEMFProvider(namespace=namespace) + my_metrics = Metrics(provider=provider) + for metric in metrics: + my_metrics.add_metric(**metric) + for dimension in dimensions: + my_metrics.add_dimension(**dimension) + + # WHEN we manually the metrics + my_metrics.flush_metrics() + + output = capture_metrics_output(capsys) + expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace) + + # THEN we should have no exceptions + # and a valid EMF object should be flushed correctly + remove_timestamp(metrics=[output, expected]) + assert expected == output + + def test_log_metrics(capsys, metrics, dimensions, namespace): # GIVEN Metrics is initialized my_metrics = Metrics(namespace=namespace) @@ -1016,7 +1036,7 @@ def test_metric_manage_metadata_set(): expected_dict = {"setting": "On"} try: - metric = MetricManager(metadata_set=expected_dict) + metric = AmazonCloudWatchEMFProvider(metadata_set=expected_dict) assert metric.metadata_set == expected_dict except AttributeError: pytest.fail("AttributeError should not be raised") @@ -1267,10 +1287,6 @@ def add_cold_start_metric(self, metric_name: str, function_name: str) -> None: return MetricsClass -def test_cloudwatch_emf(namespace): - assert AmazonCloudWatchEMF == Metrics - - def test_metrics_provider_basic(capsys, metrics_provider, metric): provider = metrics_provider() provider.add_metric(**metric) From 294bd8538435cc834e588adb9190ca2a200fe07f Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Jul 2023 16:46:29 +0100 Subject: [PATCH 27/32] refactoring cloudwatchemf provider and cleaning code --- aws_lambda_powertools/metrics/__init__.py | 17 +- aws_lambda_powertools/metrics/base.py | 53 +- aws_lambda_powertools/metrics/exceptions.py | 14 +- aws_lambda_powertools/metrics/metrics.py | 308 +++++++++++- .../metrics/provider/__init__.py | 6 - .../provider/cloudwatch_emf/__init__.py | 0 .../cloudwatch.py} | 470 +----------------- .../provider/cloudwatch_emf/cold_start.py | 9 + .../provider/cloudwatch_emf/constants.py | 2 + .../provider/cloudwatch_emf/exceptions.py | 10 + .../cloudwatch_emf/metric_properties.py | 37 ++ ruff.toml | 2 + tests/functional/test_metrics.py | 34 +- 13 files changed, 426 insertions(+), 536 deletions(-) create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf/__init__.py rename aws_lambda_powertools/metrics/provider/{amazon_cloudwatch_emf.py => cloudwatch_emf/cloudwatch.py} (52%) create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf/cold_start.py create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf/constants.py create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf/exceptions.py create mode 100644 aws_lambda_powertools/metrics/provider/cloudwatch_emf/metric_properties.py diff --git a/aws_lambda_powertools/metrics/__init__.py b/aws_lambda_powertools/metrics/__init__.py index 911cf85f544..b8c94478816 100644 --- a/aws_lambda_powertools/metrics/__init__.py +++ b/aws_lambda_powertools/metrics/__init__.py @@ -1,27 +1,22 @@ """CloudWatch Embedded Metric Format utility """ +from aws_lambda_powertools.metrics.base import MetricResolution, MetricUnit, single_metric from aws_lambda_powertools.metrics.exceptions import ( MetricResolutionError, MetricUnitError, MetricValueError, SchemaValidationError, ) -from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - EphemeralMetrics, - MetricResolution, - Metrics, - MetricUnit, - single_metric, -) +from aws_lambda_powertools.metrics.metrics import EphemeralMetrics, Metrics __all__ = [ - "Metrics", - "EphemeralMetrics", "single_metric", - "MetricUnit", "MetricUnitError", - "MetricResolution", "MetricResolutionError", "SchemaValidationError", "MetricValueError", + "Metrics", + "EphemeralMetrics", + "MetricResolution", + "MetricUnit", ] diff --git a/aws_lambda_powertools/metrics/base.py b/aws_lambda_powertools/metrics/base.py index 27d76a46939..b32421431cd 100644 --- a/aws_lambda_powertools/metrics/base.py +++ b/aws_lambda_powertools/metrics/base.py @@ -9,7 +9,6 @@ import warnings from collections import defaultdict from contextlib import contextmanager -from enum import Enum from typing import Any, Callable, Dict, Generator, List, Optional, Union from aws_lambda_powertools.metrics.exceptions import ( @@ -18,50 +17,20 @@ MetricValueError, SchemaValidationError, ) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cold_start import ( + reset_cold_start_flag, # noqa: F401 # backwards compatibility +) +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice logger = logging.getLogger(__name__) -MAX_METRICS = 100 -MAX_DIMENSIONS = 29 - -is_cold_start = True - - -class MetricResolution(Enum): - Standard = 60 - High = 1 - - -class MetricUnit(Enum): - Seconds = "Seconds" - Microseconds = "Microseconds" - Milliseconds = "Milliseconds" - Bytes = "Bytes" - Kilobytes = "Kilobytes" - Megabytes = "Megabytes" - Gigabytes = "Gigabytes" - Terabytes = "Terabytes" - Bits = "Bits" - Kilobits = "Kilobits" - Megabits = "Megabits" - Gigabits = "Gigabits" - Terabits = "Terabits" - Percent = "Percent" - Count = "Count" - BytesPerSecond = "Bytes/Second" - KilobytesPerSecond = "Kilobytes/Second" - MegabytesPerSecond = "Megabytes/Second" - GigabytesPerSecond = "Gigabytes/Second" - TerabytesPerSecond = "Terabytes/Second" - BitsPerSecond = "Bits/Second" - KilobitsPerSecond = "Kilobits/Second" - MegabitsPerSecond = "Megabits/Second" - GigabitsPerSecond = "Gigabits/Second" - TerabitsPerSecond = "Terabits/Second" - CountPerSecond = "Count/Second" +# Maintenance: alias due to Hyrum's law +is_cold_start = cold_start.is_cold_start class MetricManager: @@ -637,9 +606,3 @@ def single_metric( metric_set = metric.serialize_metric_set() finally: print(json.dumps(metric_set, separators=(",", ":"))) - - -def reset_cold_start_flag(): - global is_cold_start - if not is_cold_start: - is_cold_start = True diff --git a/aws_lambda_powertools/metrics/exceptions.py b/aws_lambda_powertools/metrics/exceptions.py index 94f492d14d7..30a4996d67e 100644 --- a/aws_lambda_powertools/metrics/exceptions.py +++ b/aws_lambda_powertools/metrics/exceptions.py @@ -1,13 +1,4 @@ -class MetricUnitError(Exception): - """When metric unit is not supported by CloudWatch""" - - pass - - -class MetricResolutionError(Exception): - """When metric resolution is not supported by CloudWatch""" - - pass +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import MetricResolutionError, MetricUnitError class SchemaValidationError(Exception): @@ -20,3 +11,6 @@ class MetricValueError(Exception): """When metric value isn't a valid number""" pass + + +__all__ = ["MetricUnitError", "MetricResolutionError", "SchemaValidationError", "MetricValueError"] diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 4b3216000fa..74c0a2f30c9 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -1,7 +1,305 @@ # NOTE: keeps for compatibility -from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - EphemeralMetrics, - Metrics, -) +from __future__ import annotations -__all__ = ["Metrics", "EphemeralMetrics"] +from typing import Any, Callable, Dict, Optional, Union + +from aws_lambda_powertools.metrics.base import MetricResolution, MetricUnit +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider + + +class Metrics: + """Metrics create an EMF object with up to 100 metrics + + Use Metrics when you need to create multiple metrics that have + dimensions in common (e.g. service_name="payment"). + + Metrics up to 100 metrics in memory and are shared across + all its instances. That means it can be safely instantiated outside + of a Lambda function, or anywhere else. + + A decorator (log_metrics) is provided so metrics are published at the end of its execution. + If more than 100 metrics are added at a given function execution, + these metrics are serialized and published before adding a given metric + to prevent metric truncation. + + Example + ------- + **Creates a few metrics and publish at the end of a function execution** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + + @metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(): + metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) + metrics.add_dimension(name="function_version", value="$LATEST") + + return True + + Environment variables + --------------------- + POWERTOOLS_METRICS_NAMESPACE : str + metric namespace + POWERTOOLS_SERVICE_NAME : str + service name used for default dimension + + Parameters + ---------- + service : str, optional + service name to be used as metric dimension, by default "service_undefined" + namespace : str, optional + Namespace for metrics + + Raises + ------ + MetricUnitError + When metric unit isn't supported by CloudWatch + MetricResolutionError + When metric resolution isn't supported by CloudWatch + MetricValueError + When metric value isn't a number + SchemaValidationError + When metric object fails EMF schema validation + """ + + # NOTE: We use class attrs to share metrics data across instances + # this allows customers to initialize Metrics() throughout their code base (and middlewares) + # and not get caught by accident with metrics data loss, or data deduplication + # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't + # Result: ProductCreated is created twice as we now have 2 different EMF blobs + _metrics: Dict[str, Any] = {} + _dimensions: Dict[str, str] = {} + _metadata: Dict[str, Any] = {} + _default_dimensions: Dict[str, Any] = {} + + def __init__( + self, + service: str | None = None, + namespace: str | None = None, + provider: AmazonCloudWatchEMFProvider | None = None, + ): + self.metric_set = self._metrics + self.metadata_set = self._metadata + self.default_dimensions = self._default_dimensions + self.dimension_set = self._dimensions + + self.dimension_set.update(**self._default_dimensions) + + if provider is None: + self.provider = AmazonCloudWatchEMFProvider( + namespace=namespace, + service=service, + metric_set=self.metric_set, + dimension_set=self.dimension_set, + metadata_set=self.metadata_set, + default_dimensions=self._default_dimensions, + ) + else: + self.provider = provider + + def add_metric( + self, + name: str, + unit: MetricUnit | str, + value: float, + resolution: MetricResolution | int = 60, + ) -> None: + self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) + + def add_dimension(self, name: str, value: str) -> None: + self.provider.add_dimension(name=name, value=value) + + def serialize_metric_set( + self, + metrics: Dict | None = None, + dimensions: Dict | None = None, + metadata: Dict | None = None, + ) -> Dict: + return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) + + def add_metadata(self, key: str, value: Any) -> None: + self.provider.add_metadata(key=key, value=value) + + def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: + self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) + + def log_metrics( + self, + lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, + capture_cold_start_metric: bool = False, + raise_on_empty_metrics: bool = False, + default_dimensions: Dict[str, str] | None = None, + ): + return self.provider.log_metrics( + lambda_handler=lambda_handler, + capture_cold_start_metric=capture_cold_start_metric, + raise_on_empty_metrics=raise_on_empty_metrics, + default_dimensions=default_dimensions, + ) + + def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: + return self.provider._extract_metric_resolution_value(resolution=resolution) + + def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: + return self.provider._extract_metric_unit_value(unit=unit) + + def _add_cold_start_metric(self, context: Any) -> None: + self.provider._add_cold_start_metric(context=context) + + def set_default_dimensions(self, **dimensions) -> None: + """Persist dimensions across Lambda invocations + + Parameters + ---------- + dimensions : Dict[str, Any], optional + metric dimensions as key=value + + Example + ------- + **Sets some default dimensions that will always be present across metrics and invocations** + + from aws_lambda_powertools import Metrics + + metrics = Metrics(namespace="ServerlessAirline", service="payment") + metrics.set_default_dimensions(environment="demo", another="one") + + @metrics.log_metrics() + def lambda_handler(): + return True + """ + for name, value in dimensions.items(): + self.add_dimension(name, value) + + self.default_dimensions.update(**dimensions) + + def clear_default_dimensions(self) -> None: + self.default_dimensions.clear() + + def clear_metrics(self) -> None: + self.provider.clear_metrics() + + +# Maintenance: until v3, we can't afford to break customers. +# AmazonCloudWatchEMFProvider has the exact same functionality (non-singleton) +# so we simply alias. If a customer subclassed `EphemeralMetrics` and somehow relied on __name__ +# we can quickly revert and duplicate code while using self.provider + +EphemeralMetrics = AmazonCloudWatchEMFProvider + +# noqa: ERA001 +# class EphemeralMetrics(MetricManager): +# """Non-singleton version of Metrics to not persist metrics across instances +# +# NOTE: This is useful when you want to: +# +# - Create metrics for distinct namespaces +# - Create the same metrics with different dimensions more than once +# """ +# +# # _dimensions: Dict[str, str] = {} +# _default_dimensions: Dict[str, Any] = {} +# +# def __init__( +# self, +# service: str | None = None, +# namespace: str | None = None, +# provider: AmazonCloudWatchEMFProvider | None = None, +# ): +# super().__init__(namespace=namespace, service=service) +# +# self.default_dimensions = self._default_dimensions +# # # self.dimension_set = self._dimensions +# # self.dimension_set.update(**self._default_dimensions) +# +# self.provider = provider or AmazonCloudWatchEMFProvider( +# namespace=namespace, +# service=service, +# metric_set=self.metric_set, +# metadata_set=self.metadata_set, +# dimension_set=self.dimension_set, +# default_dimensions=self._default_dimensions, +# ) +# +# def add_metric( +# self, +# name: str, +# unit: MetricUnit | str, +# value: float, +# resolution: MetricResolution | int = 60, +# ) -> None: +# return self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) +# +# def add_dimension(self, name: str, value: str) -> None: +# return self.provider.add_dimension(name=name, value=value) +# +# def serialize_metric_set( +# self, +# metrics: Dict | None = None, +# dimensions: Dict | None = None, +# metadata: Dict | None = None, +# ) -> Dict: +# return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) +# +# def add_metadata(self, key: str, value: Any) -> None: +# self.provider.add_metadata(key=key, value=value) +# +# def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: +# self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) +# +# def log_metrics( +# self, +# lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, +# capture_cold_start_metric: bool = False, +# raise_on_empty_metrics: bool = False, +# default_dimensions: Dict[str, str] | None = None, +# ): +# return self.provider.log_metrics( +# lambda_handler=lambda_handler, +# capture_cold_start_metric=capture_cold_start_metric, +# raise_on_empty_metrics=raise_on_empty_metrics, +# default_dimensions=default_dimensions, +# ) +# +# def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: +# return self.provider._extract_metric_resolution_value(resolution=resolution) +# +# def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: +# return self.provider._extract_metric_unit_value(unit=unit) +# +# def _add_cold_start_metric(self, context: Any) -> None: +# return self.provider._add_cold_start_metric(context=context) +# +# def set_default_dimensions(self, **dimensions) -> None: +# """Persist dimensions across Lambda invocations +# +# Parameters +# ---------- +# dimensions : Dict[str, Any], optional +# metric dimensions as key=value +# +# Example +# ------- +# **Sets some default dimensions that will always be present across metrics and invocations** +# +# from aws_lambda_powertools import Metrics +# +# metrics = Metrics(namespace="ServerlessAirline", service="payment") +# metrics.set_default_dimensions(environment="demo", another="one") +# +# @metrics.log_metrics() +# def lambda_handler(): +# return True +# """ +# return self.provider.set_default_dimensions(**dimensions) +# +# def clear_default_dimensions(self) -> None: +# self.default_dimensions.clear() +# +# def clear_metrics(self) -> None: +# self.provider.clear_metrics() +# # re-add default dimensions +# self.set_default_dimensions(**self.default_dimensions) +# + +# __all__ = [] diff --git a/aws_lambda_powertools/metrics/provider/__init__.py b/aws_lambda_powertools/metrics/provider/__init__.py index 1ae7b79b09b..814812c135b 100644 --- a/aws_lambda_powertools/metrics/provider/__init__.py +++ b/aws_lambda_powertools/metrics/provider/__init__.py @@ -1,12 +1,6 @@ -from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - EphemeralMetrics, - Metrics, -) from aws_lambda_powertools.metrics.provider.base import MetricsBase, MetricsProviderBase __all__ = [ "MetricsBase", "MetricsProviderBase", - "Metrics", - "EphemeralMetrics", ] diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/__init__.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py similarity index 52% rename from aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py rename to aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py index 66c54b57c47..921fcee6045 100644 --- a/aws_lambda_powertools/metrics/provider/amazon_cloudwatch_emf.py +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cloudwatch.py @@ -8,62 +8,24 @@ import os import warnings from collections import defaultdict -from contextlib import contextmanager -from enum import Enum -from typing import Any, Callable, Dict, Generator, List, Optional, Union - -from aws_lambda_powertools.metrics.exceptions import ( +from typing import Any, Callable, Dict, List, Optional, Union + +from aws_lambda_powertools.metrics.base import single_metric +from aws_lambda_powertools.metrics.exceptions import MetricValueError, SchemaValidationError +from aws_lambda_powertools.metrics.provider import MetricsProviderBase +from aws_lambda_powertools.metrics.provider.cloudwatch_emf import cold_start +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS, MAX_METRICS +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.exceptions import ( MetricResolutionError, MetricUnitError, - MetricValueError, - SchemaValidationError, ) -from aws_lambda_powertools.metrics.provider.base import MetricsProviderBase +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.metric_properties import MetricResolution, MetricUnit from aws_lambda_powertools.metrics.types import MetricNameUnitResolution from aws_lambda_powertools.shared import constants from aws_lambda_powertools.shared.functions import resolve_env_var_choice logger = logging.getLogger(__name__) -MAX_METRICS = 100 -MAX_DIMENSIONS = 29 - -is_cold_start = True - - -class MetricResolution(Enum): - Standard = 60 - High = 1 - - -class MetricUnit(Enum): - Seconds = "Seconds" - Microseconds = "Microseconds" - Milliseconds = "Milliseconds" - Bytes = "Bytes" - Kilobytes = "Kilobytes" - Megabytes = "Megabytes" - Gigabytes = "Gigabytes" - Terabytes = "Terabytes" - Bits = "Bits" - Kilobits = "Kilobits" - Megabits = "Megabits" - Gigabits = "Gigabits" - Terabits = "Terabits" - Percent = "Percent" - Count = "Count" - BytesPerSecond = "Bytes/Second" - KilobytesPerSecond = "Kilobytes/Second" - MegabytesPerSecond = "Megabytes/Second" - GigabytesPerSecond = "Gigabytes/Second" - TerabytesPerSecond = "Terabytes/Second" - BitsPerSecond = "Bits/Second" - KilobitsPerSecond = "Kilobits/Second" - MegabitsPerSecond = "Megabits/Second" - GigabitsPerSecond = "Gigabits/Second" - TerabitsPerSecond = "Terabits/Second" - CountPerSecond = "Count/Second" - class AmazonCloudWatchEMFProvider(MetricsProviderBase): """Base class for metric functionality (namespace, metric, dimension, serialization) @@ -102,16 +64,21 @@ def __init__( namespace: str | None = None, metadata_set: Dict[str, Any] | None = None, service: str | None = None, + default_dimensions: Dict[str, Any] | None = None, ): self.metric_set = metric_set if metric_set is not None else {} self.dimension_set = dimension_set if dimension_set is not None else {} + self.default_dimensions = default_dimensions or {} self.namespace = resolve_env_var_choice(choice=namespace, env=os.getenv(constants.METRICS_NAMESPACE_ENV)) self.service = resolve_env_var_choice(choice=service, env=os.getenv(constants.SERVICE_NAME_ENV)) self.metadata_set = metadata_set if metadata_set is not None else {} + self._metric_units = [unit.value for unit in MetricUnit] self._metric_unit_valid_options = list(MetricUnit.__members__) self._metric_resolutions = [resolution.value for resolution in MetricResolution] + self.dimension_set.update(**self.default_dimensions) + def add_metric( self, name: str, @@ -333,6 +300,7 @@ def clear_metrics(self) -> None: self.metric_set.clear() self.dimension_set.clear() self.metadata_set.clear() + self.set_default_dimensions(**self.default_dimensions) def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: """Manually flushes the metrics. This is normally not necessary, @@ -493,154 +461,13 @@ def _add_cold_start_metric(self, context: Any) -> None: context : Any Lambda context """ - global is_cold_start - if is_cold_start: + if cold_start.is_cold_start: logger.debug("Adding cold start metric and function_name dimension") with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace=self.namespace) as metric: metric.add_dimension(name="function_name", value=context.function_name) if self.service: metric.add_dimension(name="service", value=str(self.service)) - is_cold_start = False - - -class Metrics(AmazonCloudWatchEMFProvider): - """Metrics create an EMF object with up to 100 metrics - - Use Metrics when you need to create multiple metrics that have - dimensions in common (e.g. service_name="payment"). - - Metrics up to 100 metrics in memory and are shared across - all its instances. That means it can be safely instantiated outside - of a Lambda function, or anywhere else. - - A decorator (log_metrics) is provided so metrics are published at the end of its execution. - If more than 100 metrics are added at a given function execution, - these metrics are serialized and published before adding a given metric - to prevent metric truncation. - - Example - ------- - **Creates a few metrics and publish at the end of a function execution** - - from aws_lambda_powertools import Metrics - - metrics = Metrics(namespace="ServerlessAirline", service="payment") - - @metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(): - metrics.add_metric(name="BookingConfirmation", unit="Count", value=1) - metrics.add_dimension(name="function_version", value="$LATEST") - - return True - - Environment variables - --------------------- - POWERTOOLS_METRICS_NAMESPACE : str - metric namespace - POWERTOOLS_SERVICE_NAME : str - service name used for default dimension - - Parameters - ---------- - service : str, optional - service name to be used as metric dimension, by default "service_undefined" - namespace : str, optional - Namespace for metrics - - Raises - ------ - MetricUnitError - When metric unit isn't supported by CloudWatch - MetricResolutionError - When metric resolution isn't supported by CloudWatch - MetricValueError - When metric value isn't a number - SchemaValidationError - When metric object fails EMF schema validation - """ - - # NOTE: We use class attrs to share metrics data across instances - # this allows customers to initialize Metrics() throughout their code base (and middlewares) - # and not get caught by accident with metrics data loss, or data deduplication - # e.g., m1 and m2 add metric ProductCreated, however m1 has 'version' dimension but m2 doesn't - # Result: ProductCreated is created twice as we now have 2 different EMF blobs - _metrics: Dict[str, Any] = {} - _dimensions: Dict[str, str] = {} - _metadata: Dict[str, Any] = {} - _default_dimensions: Dict[str, Any] = {} - - def __init__( - self, - service: str | None = None, - namespace: str | None = None, - provider: AmazonCloudWatchEMFProvider | None = None, - ): - self.metric_set = self._metrics - self.metadata_set = self._metadata - self.default_dimensions = self._default_dimensions - self.dimension_set = self._dimensions - - self.dimension_set.update(**self._default_dimensions) - - if provider is None: - self.provider = AmazonCloudWatchEMFProvider( - namespace=namespace, - service=service, - metric_set=self.metric_set, - dimension_set=self.dimension_set, - metadata_set=self.metadata_set, - ) - else: - self.provider = provider - - def add_metric( - self, - name: str, - unit: MetricUnit | str, - value: float, - resolution: MetricResolution | int = 60, - ) -> None: - self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) - - def add_dimension(self, name: str, value: str) -> None: - self.provider.add_dimension(name=name, value=value) - - def serialize_metric_set( - self, - metrics: Dict | None = None, - dimensions: Dict | None = None, - metadata: Dict | None = None, - ) -> Dict: - return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) - - def add_metadata(self, key: str, value: Any) -> None: - self.provider.add_metadata(key=key, value=value) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) - - def log_metrics( - self, - lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, - capture_cold_start_metric: bool = False, - raise_on_empty_metrics: bool = False, - default_dimensions: Dict[str, str] | None = None, - ): - return self.provider.log_metrics( - lambda_handler=lambda_handler, - capture_cold_start_metric=capture_cold_start_metric, - raise_on_empty_metrics=raise_on_empty_metrics, - default_dimensions=default_dimensions, - ) - - def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: - return self.provider._extract_metric_resolution_value(resolution=resolution) - - def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: - return self.provider._extract_metric_unit_value(unit=unit) - - def _add_cold_start_metric(self, context: Any) -> None: - self.provider._add_cold_start_metric(context=context) + cold_start.is_cold_start = False def set_default_dimensions(self, **dimensions) -> None: """Persist dimensions across Lambda invocations @@ -667,266 +494,3 @@ def lambda_handler(): self.add_dimension(name, value) self.default_dimensions.update(**dimensions) - - def clear_default_dimensions(self) -> None: - self.default_dimensions.clear() - - def clear_metrics(self) -> None: - self.provider.clear_metrics() - # re-add default dimensions - self.set_default_dimensions(**self.default_dimensions) - - -class EphemeralMetrics(AmazonCloudWatchEMFProvider): - """Non-singleton version of Metrics to not persist metrics across instances - - NOTE: This is useful when you want to: - - - Create metrics for distinct namespaces - - Create the same metrics with different dimensions more than once - """ - - _dimensions: Dict[str, str] = {} - _default_dimensions: Dict[str, Any] = {} - - def __init__( - self, - service: str | None = None, - namespace: str | None = None, - provider: AmazonCloudWatchEMFProvider | None = None, - ): - self.default_dimensions = self._default_dimensions - self.dimension_set = self._dimensions - - self.dimension_set.update(**self._default_dimensions) - - if provider is None: - self.provider = AmazonCloudWatchEMFProvider(namespace=namespace, service=service) - else: - self.provider = provider - - def add_metric( - self, - name: str, - unit: MetricUnit | str, - value: float, - resolution: MetricResolution | int = 60, - ) -> None: - return self.provider.add_metric(name=name, unit=unit, value=value, resolution=resolution) - - def add_dimension(self, name: str, value: str) -> None: - return self.provider.add_dimension(name=name, value=value) - - def serialize_metric_set( - self, - metrics: Dict | None = None, - dimensions: Dict | None = None, - metadata: Dict | None = None, - ) -> Dict: - return self.provider.serialize_metric_set(metrics=metrics, dimensions=dimensions, metadata=metadata) - - def add_metadata(self, key: str, value: Any) -> None: - self.provider.add_metadata(key=key, value=value) - - def flush_metrics(self, raise_on_empty_metrics: bool = False) -> None: - self.provider.flush_metrics(raise_on_empty_metrics=raise_on_empty_metrics) - - def log_metrics( - self, - lambda_handler: Callable[[Dict, Any], Any] | Optional[Callable[[Dict, Any, Optional[Dict]], Any]] = None, - capture_cold_start_metric: bool = False, - raise_on_empty_metrics: bool = False, - default_dimensions: Dict[str, str] | None = None, - ): - return self.provider.log_metrics( - lambda_handler=lambda_handler, - capture_cold_start_metric=capture_cold_start_metric, - raise_on_empty_metrics=raise_on_empty_metrics, - default_dimensions=default_dimensions, - ) - - def _extract_metric_resolution_value(self, resolution: Union[int, MetricResolution]) -> int: - return self.provider._extract_metric_resolution_value(resolution=resolution) - - def _extract_metric_unit_value(self, unit: Union[str, MetricUnit]) -> str: - return self.provider._extract_metric_unit_value(unit=unit) - - def _add_cold_start_metric(self, context: Any) -> None: - self.provider._add_cold_start_metric(context=context) - - def set_default_dimensions(self, **dimensions) -> None: - """Persist dimensions across Lambda invocations - - Parameters - ---------- - dimensions : Dict[str, Any], optional - metric dimensions as key=value - - Example - ------- - **Sets some default dimensions that will always be present across metrics and invocations** - - from aws_lambda_powertools import Metrics - - metrics = Metrics(namespace="ServerlessAirline", service="payment") - metrics.set_default_dimensions(environment="demo", another="one") - - @metrics.log_metrics() - def lambda_handler(): - return True - """ - for name, value in dimensions.items(): - self.add_dimension(name, value) - - self.default_dimensions.update(**dimensions) - - def clear_default_dimensions(self) -> None: - self.default_dimensions.clear() - - def clear_metrics(self) -> None: - self.provider.clear_metrics() - # re-add default dimensions - self.set_default_dimensions(**self.default_dimensions) - - -class SingleMetric(AmazonCloudWatchEMFProvider): - """SingleMetric creates an EMF object with a single metric. - - EMF specification doesn't allow metrics with different dimensions. - SingleMetric overrides MetricManager's add_metric method to do just that. - - Use `single_metric` when you need to create metrics with different dimensions, - otherwise `aws_lambda_powertools.metrics.metrics.Metrics` is - a more cost effective option - - Environment variables - --------------------- - POWERTOOLS_METRICS_NAMESPACE : str - metric namespace - - Example - ------- - **Creates cold start metric with function_version as dimension** - - import json - from aws_lambda_powertools.metrics import single_metric, MetricUnit, MetricResolution - metric = single_metric(namespace="ServerlessAirline") - - metric.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard) - metric.add_dimension(name="function_version", value=47) - - print(json.dumps(metric.serialize_metric_set(), indent=4)) - - Parameters - ---------- - MetricManager : MetricManager - Inherits from `aws_lambda_powertools.metrics.base.MetricManager` - """ - - def add_metric( - self, - name: str, - unit: MetricUnit | str, - value: float, - resolution: MetricResolution | int = 60, - ) -> None: - """Method to prevent more than one metric being created - - Parameters - ---------- - name : str - Metric name (e.g. BookingConfirmation) - unit : MetricUnit - Metric unit (e.g. "Seconds", MetricUnit.Seconds) - value : float - Metric value - resolution : MetricResolution - Metric resolution (e.g. 60, MetricResolution.Standard) - """ - if len(self.metric_set) > 0: - logger.debug(f"Metric {name} already set, skipping...") - return - return super().add_metric(name, unit, value, resolution) - - -@contextmanager -def single_metric( - name: str, - unit: MetricUnit, - value: float, - resolution: MetricResolution | int = 60, - namespace: str | None = None, - default_dimensions: Dict[str, str] | None = None, -) -> Generator[SingleMetric, None, None]: - """Context manager to simplify creation of a single metric - - Example - ------- - **Creates cold start metric with function_version as dimension** - - from aws_lambda_powertools import single_metric - from aws_lambda_powertools.metrics import MetricUnit - from aws_lambda_powertools.metrics import MetricResolution - - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard, namespace="ServerlessAirline") as metric: - metric.add_dimension(name="function_version", value="47") - - **Same as above but set namespace using environment variable** - - $ export POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" - - from aws_lambda_powertools import single_metric - from aws_lambda_powertools.metrics import MetricUnit - from aws_lambda_powertools.metrics import MetricResolution - - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, resolution=MetricResolution.Standard) as metric: - metric.add_dimension(name="function_version", value="47") - - Parameters - ---------- - name : str - Metric name - unit : MetricUnit - `aws_lambda_powertools.helper.models.MetricUnit` - resolution : MetricResolution - `aws_lambda_powertools.helper.models.MetricResolution` - value : float - Metric value - namespace: str - Namespace for metrics - - Yields - ------- - SingleMetric - SingleMetric class instance - - Raises - ------ - MetricUnitError - When metric metric isn't supported by CloudWatch - MetricResolutionError - When metric resolution isn't supported by CloudWatch - MetricValueError - When metric value isn't a number - SchemaValidationError - When metric object fails EMF schema validation - """ # noqa: E501 - metric_set: Dict | None = None - try: - metric: SingleMetric = SingleMetric(namespace=namespace) - metric.add_metric(name=name, unit=unit, value=value, resolution=resolution) - - if default_dimensions: - for dim_name, dim_value in default_dimensions.items(): - metric.add_dimension(name=dim_name, value=dim_value) - - yield metric - metric_set = metric.serialize_metric_set() - finally: - print(json.dumps(metric_set, separators=(",", ":"))) - - -def reset_cold_start_flag(): - global is_cold_start - if not is_cold_start: - is_cold_start = True diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cold_start.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cold_start.py new file mode 100644 index 00000000000..c6ef67bd787 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/cold_start.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +is_cold_start = True + + +def reset_cold_start_flag(): + global is_cold_start + if not is_cold_start: + is_cold_start = True diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/constants.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/constants.py new file mode 100644 index 00000000000..d8f5da0cec8 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/constants.py @@ -0,0 +1,2 @@ +MAX_DIMENSIONS = 29 +MAX_METRICS = 100 diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/exceptions.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/exceptions.py new file mode 100644 index 00000000000..6ac2d932ea7 --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/exceptions.py @@ -0,0 +1,10 @@ +class MetricUnitError(Exception): + """When metric unit is not supported by CloudWatch""" + + pass + + +class MetricResolutionError(Exception): + """When metric resolution is not supported by CloudWatch""" + + pass diff --git a/aws_lambda_powertools/metrics/provider/cloudwatch_emf/metric_properties.py b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/metric_properties.py new file mode 100644 index 00000000000..ea11bb997bb --- /dev/null +++ b/aws_lambda_powertools/metrics/provider/cloudwatch_emf/metric_properties.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +from enum import Enum + + +class MetricUnit(Enum): + Seconds = "Seconds" + Microseconds = "Microseconds" + Milliseconds = "Milliseconds" + Bytes = "Bytes" + Kilobytes = "Kilobytes" + Megabytes = "Megabytes" + Gigabytes = "Gigabytes" + Terabytes = "Terabytes" + Bits = "Bits" + Kilobits = "Kilobits" + Megabits = "Megabits" + Gigabits = "Gigabits" + Terabits = "Terabits" + Percent = "Percent" + Count = "Count" + BytesPerSecond = "Bytes/Second" + KilobytesPerSecond = "Kilobytes/Second" + MegabytesPerSecond = "Megabytes/Second" + GigabytesPerSecond = "Gigabytes/Second" + TerabytesPerSecond = "Terabytes/Second" + BitsPerSecond = "Bits/Second" + KilobitsPerSecond = "Kilobits/Second" + MegabitsPerSecond = "Megabits/Second" + GigabitsPerSecond = "Gigabits/Second" + TerabitsPerSecond = "Terabits/Second" + CountPerSecond = "Count/Second" + + +class MetricResolution(Enum): + Standard = 60 + High = 1 diff --git a/ruff.toml b/ruff.toml index 424040ede1f..be67606bbe5 100644 --- a/ruff.toml +++ b/ruff.toml @@ -69,3 +69,5 @@ split-on-trailing-comma = true "tests/e2e/utils/data_fetcher/__init__.py" = ["F401"] "aws_lambda_powertools/utilities/data_classes/s3_event.py" = ["A003"] "aws_lambda_powertools/utilities/parser/models/__init__.py" = ["E402"] +# Maintenance: we're keeping EphemeralMetrics code in case of Hyrum's law so we can quickly revert it +"aws_lambda_powertools/metrics/metrics.py" = ["ERA001"] diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 055de54db2b..39003243362 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -5,26 +5,25 @@ import pytest -from aws_lambda_powertools import Metrics, single_metric from aws_lambda_powertools.metrics import ( EphemeralMetrics, MetricResolution, MetricResolutionError, + Metrics, MetricUnit, MetricUnitError, MetricValueError, SchemaValidationError, + single_metric, ) from aws_lambda_powertools.metrics.provider import ( MetricsBase, MetricsProviderBase, ) -from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import ( - MAX_DIMENSIONS, - AmazonCloudWatchEMFProvider, - reset_cold_start_flag, -) from aws_lambda_powertools.metrics.provider.base import reset_cold_start_flag_provider +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cloudwatch import AmazonCloudWatchEMFProvider +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.cold_start import reset_cold_start_flag +from aws_lambda_powertools.metrics.provider.cloudwatch_emf.constants import MAX_DIMENSIONS @pytest.fixture(scope="function", autouse=True) @@ -1169,6 +1168,7 @@ def test_ephemeral_metrics_isolated_data_set_with_default_dimension(metric, dime # GIVEN two EphemeralMetrics instances are initialized # One with default dimension and another without my_metrics = EphemeralMetrics(namespace=namespace) + my_metrics.set_default_dimensions(dev="powertools") isolated_metrics = EphemeralMetrics(namespace=namespace) @@ -1371,3 +1371,25 @@ def lambda_handler(evt, context): # and specifically about the lack of Metrics with pytest.raises(SchemaValidationError, match="Must contain at least one metric."): lambda_handler({}, {}) + + +def test_log_metrics_capture_cold_start_metric_once_with_provider_and_ephemeral(capsys, namespace, service): + # GIVEN Metrics is initialized + my_metrics = Metrics(service=service, namespace=namespace) + my_isolated_metrics = EphemeralMetrics(service=service, namespace=namespace) + + # WHEN log_metrics is used with capture_cold_start_metric + @my_metrics.log_metrics(capture_cold_start_metric=True) + @my_isolated_metrics.log_metrics(capture_cold_start_metric=True) + def lambda_handler(evt, context): + pass + + LambdaContext = namedtuple("LambdaContext", "function_name") + lambda_handler({}, LambdaContext("example_fn")) + + output = capture_metrics_output(capsys) + + # THEN ColdStart metric and function_name and service dimension should be logged + assert output["ColdStart"] == [1.0] + assert output["function_name"] == "example_fn" + assert output["service"] == service From 077d7e7f14ac417758b87c76e163b7156d078cee Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Jul 2023 16:52:37 +0100 Subject: [PATCH 28/32] fix mypy error --- aws_lambda_powertools/metrics/metric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws_lambda_powertools/metrics/metric.py b/aws_lambda_powertools/metrics/metric.py index 4acd8b5eb4a..e2ac49df489 100644 --- a/aws_lambda_powertools/metrics/metric.py +++ b/aws_lambda_powertools/metrics/metric.py @@ -1,4 +1,4 @@ # NOTE: prevents circular inheritance import -from aws_lambda_powertools.metrics.provider.amazon_cloudwatch_emf import SingleMetric, single_metric +from aws_lambda_powertools.metrics.base import SingleMetric, single_metric __all__ = ["SingleMetric", "single_metric"] From 42517ed04cdc971923bacc77a770c12ba4e1ae88 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Jul 2023 17:08:49 +0100 Subject: [PATCH 29/32] fix mypy error --- docs/maintainers.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/maintainers.md b/docs/maintainers.md index 455d33f6d8a..8f3a1980141 100644 --- a/docs/maintainers.md +++ b/docs/maintainers.md @@ -288,6 +288,7 @@ Ensure the repo highlights features that should be elevated to the project roadm Add integration checks that validate pull requests and pushes to ease the burden on Pull Request reviewers. Continuously revisit areas of improvement to reduce operational burden in all parties involved. ### Negative Impact on the Project + Actions that negatively impact the project will be handled by the admins, in coordination with other maintainers, in balance with the urgency of the issue. Examples would be [Code of Conduct](https://github.com/aws-powertools/powertools-lambda-python/blob/develop/CODE_OF_CONDUCT.md){target="_blank"} violations, deliberate harmful or malicious actions, spam, monopolization, and security risks. From 28982a3cabadf96e95340b3a59dabae956b26767 Mon Sep 17 00:00:00 2001 From: Leandro Damascena Date: Fri, 28 Jul 2023 17:37:38 +0100 Subject: [PATCH 30/32] fix metric tests --- aws_lambda_powertools/metrics/metrics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/aws_lambda_powertools/metrics/metrics.py b/aws_lambda_powertools/metrics/metrics.py index 74c0a2f30c9..66e87717d1f 100644 --- a/aws_lambda_powertools/metrics/metrics.py +++ b/aws_lambda_powertools/metrics/metrics.py @@ -148,6 +148,7 @@ def _add_cold_start_metric(self, context: Any) -> None: self.provider._add_cold_start_metric(context=context) def set_default_dimensions(self, **dimensions) -> None: + self.provider.set_default_dimensions(**dimensions) """Persist dimensions across Lambda invocations Parameters From e8cfc81e5763809ac299fd1ba2f70698bcc3f3b6 Mon Sep 17 00:00:00 2001 From: Cavalcante Damascena Date: Tue, 1 Aug 2023 13:20:11 -0300 Subject: [PATCH 31/32] fix documentation --- aws_lambda_powertools/metrics/provider/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/aws_lambda_powertools/metrics/provider/base.py b/aws_lambda_powertools/metrics/provider/base.py index 12cc74d028b..7617193033e 100644 --- a/aws_lambda_powertools/metrics/provider/base.py +++ b/aws_lambda_powertools/metrics/provider/base.py @@ -13,9 +13,9 @@ class MetricsProviderBase(Protocol): """ - Class for metric provider template. + Class for metric provider interface. - This class serves as a template for creating your own metric provider. Inherit from this class + This class serves as an interface for creating your own metric provider. Inherit from this class and implement the required methods to define your specific metric provider. Usage: From 92eda07d478f5a9c283d9d787739321cb49bdd4c Mon Sep 17 00:00:00 2001 From: Cavalcante Damascena Date: Tue, 1 Aug 2023 13:53:00 -0300 Subject: [PATCH 32/32] adding test --- tests/functional/test_metrics.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/functional/test_metrics.py b/tests/functional/test_metrics.py index 39003243362..1eed6c82294 100644 --- a/tests/functional/test_metrics.py +++ b/tests/functional/test_metrics.py @@ -1077,6 +1077,20 @@ def test_clear_default_dimensions(namespace): assert not my_metrics.default_dimensions +def test_clear_default_dimensions_with_provider(namespace): + # GIVEN Metrics is initialized with provider and we persist a set of default dimensions + my_provider = AmazonCloudWatchEMFProvider(namespace=namespace) + my_metrics = Metrics(provider=my_provider) + my_metrics.set_default_dimensions(environment="test", log_group="/lambda/test") + + # WHEN they are removed via clear_default_dimensions method + my_metrics.clear_default_dimensions() + + # THEN there should be no default dimensions in provider and metrics + assert not my_metrics.default_dimensions + assert not my_provider.default_dimensions + + def test_default_dimensions_across_instances(namespace): # GIVEN Metrics is initialized and we persist a set of default dimensions my_metrics = Metrics(namespace=namespace)