diff --git a/.gitignore b/.gitignore index 3aeeb1fb..ff0272ba 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,5 @@ pip-log.txt node_modules yarn-lock.json + +.benchmarks diff --git a/Dockerfile b/Dockerfile index 396ef25c..ed43e237 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,21 +11,32 @@ WORKDIR /build COPY . . RUN pip install . -t ./python/lib/$runtime/site-packages -# Remove *.pyc files -RUN find ./python/lib/$runtime/site-packages -name \*.pyc -delete - # Remove botocore (40MB) to reduce package size. aws-xray-sdk # installs it, while it's already provided by the Lambda Runtime. RUN rm -rf ./python/lib/$runtime/site-packages/botocore* RUN rm -rf ./python/lib/$runtime/site-packages/setuptools RUN rm -rf ./python/lib/$runtime/site-packages/jsonschema/tests RUN find . -name 'libddwaf.so' -delete -RUN rm -rf ./python/lib/$runtime/site-packages/urllib3 +RUN rm -rf ./python/lib/$runtime/site-packages/urllib3* RUN rm ./python/lib/$runtime/site-packages/ddtrace/appsec/_iast/_taint_tracking/*.so RUN rm ./python/lib/$runtime/site-packages/ddtrace/appsec/_iast/_stacktrace*.so RUN rm ./python/lib/$runtime/site-packages/ddtrace/internal/datadog/profiling/libdd_wrapper.so RUN rm ./python/lib/$runtime/site-packages/ddtrace/internal/datadog/profiling/ddup/_ddup.*.so RUN rm ./python/lib/$runtime/site-packages/ddtrace/internal/datadog/profiling/stack_v2/_stack_v2.*.so +RUN find . -name "*.dist-info" -type d | xargs rm -rf + +# Precompile all .pyc files and remove .py files. This speeds up load time. +# Compile with optimization level 2 (-OO) and PYTHONNODEBUGRANGES=1 to redtce +# size of .pyc files. +# See https://docs.python.org/3/tutorial/modules.html#compiled-python-files +# https://docs.python.org/3.11/using/cmdline.html#cmdoption-O +# https://docs.python.org/3/using/cmdline.html#envvar-PYTHONNODEBUGRANGES +RUN PYTHONNODEBUGRANGES=1 python -OO -m compileall -b ./python/lib/$runtime/site-packages +# remove all .py files except ddtrace/contrib/*/__init__.py which are necessary +# for ddtrace.patch to discover instrumationation packages. +RUN find ./python/lib/$runtime/site-packages -name \*.py | grep -v ddtrace/contrib | xargs rm -rf +RUN find ./python/lib/$runtime/site-packages/ddtrace/contrib -name \*.py | grep -v __init__ | xargs rm -rf +RUN find ./python/lib/$runtime/site-packages -name __pycache__ -type d -exec rm -r {} \+ FROM scratch COPY --from=builder /build/python / diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index a6692772..1cfacb70 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -1,3 +1,13 @@ Component,Origin,License,Copyright -flake8,gitlab.com/pycqa/flake8,MIT,"Copyright (C) 2011-2013 Tarek Ziade . Copyright (C) 2012-2016 Ian Cordasco ." +datadog,github.com/DataDog/datadogpy,BSD-3-Clause,"Copyright (c) 2015-Present Datadog, Inc " wrapt,github.com/GrahamDumpleton/wrapt,BSD-2-Clause,"Copyright (c) 2013-2019, Graham Dumpleton" +ddtrace,github.com/DataDog/dd-trace-py,BSD-3-Clause,"Copyright (c) 2016, Datadog " +urllib3,github.com/urllib3/urllib3,MIT,Copyright (c) 2008-2020 Andrey Petrov and contributors. +ujson,github.com/ultrajson/ultrajson,BSD-3-Clause,"Copyright (c) 2014, Electronic Arts Inc" +importlib_metadata,github.com/python/importlib_metadata,Apache-2.0,Copyright © Jason R. Coombs +boto3,github.com/boto/boto3,Apache-2.0,"Copyright 2013-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved." +typing_extensions,github.com/python/typing_extensions,PSF-2.0,"Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands. All rights reserved" +requests,github.com/psf/requests,Apache-2.0,"Copyright 2018 Kenneth Reitz" +pytest,github.com/pytest-dev/pytest,MIT,Copyright (c) 2004 Holger Krekel and others +pytest-benchmark,github.com/ionelmc/pytest-benchmark,BSD-2-Clause,"Copyright (c) 2014-2023, Ionel Cristian Mărieș. All rights reserved." +flake8,gitlab.com/pycqa/flake8,MIT,"Copyright (C) 2011-2013 Tarek Ziade . Copyright (C) 2012-2016 Ian Cordasco ." diff --git a/datadog_lambda/__init__.py b/datadog_lambda/__init__.py index 5cc2ba00..378fd15c 100644 --- a/datadog_lambda/__init__.py +++ b/datadog_lambda/__init__.py @@ -1,5 +1,4 @@ from datadog_lambda.cold_start import initialize_cold_start_tracing -from datadog_lambda.logger import initialize_logging import os @@ -13,11 +12,8 @@ # The minor version corresponds to the Lambda layer version. # E.g.,, version 0.5.0 gets packaged into layer version 5. -try: - import importlib.metadata as importlib_metadata -except ModuleNotFoundError: - import importlib_metadata +from datadog_lambda.version import __version__ # noqa: E402 F401 +from datadog_lambda.logger import initialize_logging # noqa: E402 -__version__ = importlib_metadata.version(__name__) initialize_logging(__name__) diff --git a/datadog_lambda/cold_start.py b/datadog_lambda/cold_start.py index 9dcbec23..d4d4b8c2 100644 --- a/datadog_lambda/cold_start.py +++ b/datadog_lambda/cold_start.py @@ -50,12 +50,16 @@ def is_new_sandbox(): def get_cold_start_tag(): """Returns the cold start tag to be used in metrics""" - return "cold_start:{}".format(str(is_cold_start()).lower()) + return "cold_start:true" if _cold_start else "cold_start:false" def get_proactive_init_tag(): """Returns the proactive init tag to be used in metrics""" - return "proactive_initialization:{}".format(str(is_proactive_init()).lower()) + return ( + "proactive_initialization:true" + if _proactive_initialization + else "proactive_initialization:false" + ) class ImportNode(object): diff --git a/datadog_lambda/extension.py b/datadog_lambda/extension.py index 159048d7..79c0031a 100644 --- a/datadog_lambda/extension.py +++ b/datadog_lambda/extension.py @@ -1,5 +1,5 @@ import logging -from os import path +import os AGENT_URL = "http://127.0.0.1:8124" FLUSH_PATH = "/lambda/flush" @@ -9,9 +9,7 @@ def is_extension_present(): - if path.exists(EXTENSION_PATH): - return True - return False + return os.path.exists(EXTENSION_PATH) def flush_extension(): diff --git a/datadog_lambda/handler.py b/datadog_lambda/handler.py index 09cc5e7d..777dc492 100644 --- a/datadog_lambda/handler.py +++ b/datadog_lambda/handler.py @@ -22,7 +22,7 @@ class HandlerError(Exception): ) parts = path.rsplit(".", 1) if len(parts) != 2: - raise HandlerError("Value %s for DD_LAMBDA_HANDLER has invalid format." % path) + raise HandlerError(f"Value {path} for DD_LAMBDA_HANDLER has invalid format.") (mod_name, handler_name) = parts diff --git a/datadog_lambda/metric.py b/datadog_lambda/metric.py index ca23ed96..e3b01a90 100644 --- a/datadog_lambda/metric.py +++ b/datadog_lambda/metric.py @@ -4,12 +4,12 @@ # Copyright 2019 Datadog, Inc. import os -import json import time import logging +import ujson as json from datadog_lambda.extension import should_use_extension -from datadog_lambda.tags import get_enhanced_metrics_tags, tag_dd_lambda_layer +from datadog_lambda.tags import get_enhanced_metrics_tags, dd_lambda_layer_tag from datadog_lambda.api import init_api logger = logging.getLogger(__name__) @@ -32,6 +32,10 @@ flush_in_thread = os.environ.get("DD_FLUSH_IN_THREAD", "").lower() == "true" lambda_stats = ThreadStatsWriter(flush_in_thread) +enhanced_metrics_enabled = ( + os.environ.get("DD_ENHANCED_METRICS", "true").lower() == "true" +) + def lambda_metric(metric_name, value, timestamp=None, tags=None, force_async=False): """ @@ -50,7 +54,8 @@ def lambda_metric(metric_name, value, timestamp=None, tags=None, force_async=Fal and always use the layer to send metrics to the extension """ flush_to_logs = os.environ.get("DD_FLUSH_TO_LOG", "").lower() == "true" - tags = tag_dd_lambda_layer(tags) + tags = [] if tags is None else list(tags) + tags.append(dd_lambda_layer_tag) if should_use_extension: logger.debug( @@ -80,7 +85,8 @@ def write_metric_point_to_stdout(metric_name, value, timestamp=None, tags=[]): "v": value, "e": timestamp or int(time.time()), "t": tags, - } + }, + escape_forward_slashes=False, ) ) @@ -89,16 +95,6 @@ def flush_stats(): lambda_stats.flush() -def are_enhanced_metrics_enabled(): - """Check env var to find if enhanced metrics should be submitted - - Returns: - boolean for whether enhanced metrics are enabled - """ - # DD_ENHANCED_METRICS defaults to true - return os.environ.get("DD_ENHANCED_METRICS", "true").lower() == "true" - - def submit_enhanced_metric(metric_name, lambda_context): """Submits the enhanced metric with the given name @@ -106,7 +102,7 @@ def submit_enhanced_metric(metric_name, lambda_context): metric_name (str): metric name w/o enhanced prefix i.e. "invocations" or "errors" lambda_context (dict): Lambda context dict passed to the function by AWS """ - if not are_enhanced_metrics_enabled(): + if not enhanced_metrics_enabled: logger.debug( "Not submitting enhanced metric %s because enhanced metrics are disabled", metric_name, diff --git a/datadog_lambda/patch.py b/datadog_lambda/patch.py index 0f6d28e9..5b8a92c5 100644 --- a/datadog_lambda/patch.py +++ b/datadog_lambda/patch.py @@ -3,11 +3,11 @@ # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019 Datadog, Inc. -import json import os import sys import logging import zlib +import ujson as json from wrapt import wrap_function_wrapper as wrap from wrapt.importer import when_imported @@ -144,14 +144,14 @@ def _print_request_string(request): data = zlib.decompress(data) data_dict = json.loads(data) data_dict.get("series", []).sort(key=lambda series: series.get("metric")) - sorted_data = json.dumps(data_dict) + sorted_data = json.dumps(data_dict, escape_forward_slashes=False) # Sort headers to prevent any differences in ordering headers = request.headers or {} sorted_headers = sorted( "{}:{}".format(key, value) for key, value in headers.items() ) - sorted_header_str = json.dumps(sorted_headers) + sorted_header_str = json.dumps(sorted_headers, escape_forward_slashes=False) print( "HTTP {} {} Headers: {} Data: {}".format( method, url, sorted_header_str, sorted_data diff --git a/datadog_lambda/tag_object.py b/datadog_lambda/tag_object.py index ec1c5a66..6d82f83b 100644 --- a/datadog_lambda/tag_object.py +++ b/datadog_lambda/tag_object.py @@ -4,8 +4,8 @@ # Copyright 2021 Datadog, Inc. from decimal import Decimal -import json import logging +import ujson as json redactable_keys = ["authorization", "x-authorization", "password", "token"] max_depth = 10 @@ -30,17 +30,17 @@ def tag_object(span, key, obj, depth=0): return span.set_tag(key, str(obj)) if isinstance(obj, list): for k, v in enumerate(obj): - formatted_key = "{}.{}".format(key, k) + formatted_key = f"{key}.{k}" tag_object(span, formatted_key, v, depth) return if hasattr(obj, "items"): for k, v in obj.items(): - formatted_key = "{}.{}".format(key, k) + formatted_key = f"{key}.{k}" tag_object(span, formatted_key, v, depth) return if hasattr(obj, "to_dict"): for k, v in obj.to_dict().items(): - formatted_key = "{}.{}".format(key, k) + formatted_key = f"{key}.{k}" tag_object(span, formatted_key, v, depth) return try: diff --git a/datadog_lambda/tags.py b/datadog_lambda/tags.py index cdaeb4ed..695d1a48 100644 --- a/datadog_lambda/tags.py +++ b/datadog_lambda/tags.py @@ -1,28 +1,13 @@ import sys -from platform import python_version_tuple - from datadog_lambda import __version__ from datadog_lambda.cold_start import get_cold_start_tag -def _format_dd_lambda_layer_tag(): - """ - Formats the dd_lambda_layer tag, e.g., 'dd_lambda_layer:datadog-python39_1' - """ - runtime = "python{}{}".format(sys.version_info[0], sys.version_info[1]) - return "dd_lambda_layer:datadog-{}_{}".format(runtime, __version__) - - -def tag_dd_lambda_layer(tags): - """ - Used by lambda_metric to insert the dd_lambda_layer tag - """ - dd_lambda_layer_tag = _format_dd_lambda_layer_tag() - if tags: - return tags + [dd_lambda_layer_tag] - else: - return [dd_lambda_layer_tag] +_major, _minor = sys.version_info[0], sys.version_info[1] +dd_lambda_layer_tag = f"dd_lambda_layer:datadog-python{_major}{_minor}_{__version__}" +runtime_tag = f"runtime:python{_major}.{_minor}" +library_version_tag = f"datadog_lambda:v{__version__}" def parse_lambda_tags_from_arn(lambda_context): @@ -32,64 +17,50 @@ def parse_lambda_tags_from_arn(lambda_context): ex: lambda_context.arn = arn:aws:lambda:us-east-1:123597598159:function:my-lambda:1 """ # Set up flag for extra testing to distinguish between a version or alias - hasAlias = False + has_alias = False # Cap the number of times to spli split_arn = lambda_context.invoked_function_arn.split(":") if len(split_arn) > 7: - hasAlias = True + has_alias = True _, _, _, region, account_id, _, function_name, alias = split_arn else: _, _, _, region, account_id, _, function_name = split_arn # Add the standard tags to a list tags = [ - "region:{}".format(region), - "account_id:{}".format(account_id), - "functionname:{}".format(function_name), + f"region:{region}", + f"account_id:{account_id}", + f"functionname:{function_name}", ] # Check if we have a version or alias - if hasAlias: + if has_alias: # If $Latest, drop the $ for datadog tag convention. A lambda alias can't start with $ if alias.startswith("$"): alias = alias[1:] # Versions are numeric. Aliases need the executed version tag elif not check_if_number(alias): - tags.append("executedversion:{}".format(lambda_context.function_version)) + tags.append(f"executedversion:{lambda_context.function_version}") # create resource tag with function name and alias/version - resource = "resource:{}:{}".format(function_name, alias) + resource = f"resource:{function_name}:{alias}" else: # Resource is only the function name otherwise - resource = "resource:{}".format(function_name) + resource = f"resource:{function_name}" tags.append(resource) return tags -def get_runtime_tag(): - """Get the runtime tag from the current Python version""" - major_version, minor_version, _ = python_version_tuple() - - return "runtime:python{major}.{minor}".format( - major=major_version, minor=minor_version - ) - - -def get_library_version_tag(): - """Get datadog lambda library tag""" - return "datadog_lambda:v{}".format(__version__) - - def get_enhanced_metrics_tags(lambda_context): """Get the list of tags to apply to enhanced metrics""" - return parse_lambda_tags_from_arn(lambda_context) + [ - get_cold_start_tag(), - "memorysize:{}".format(lambda_context.memory_limit_in_mb), - get_runtime_tag(), - get_library_version_tag(), - ] + tags = parse_lambda_tags_from_arn(lambda_context) + tags.append(get_cold_start_tag()) + tags.append(f"memorysize:{lambda_context.memory_limit_in_mb}") + tags.append(runtime_tag) + tags.append(library_version_tag) + return tags def check_if_number(alias): diff --git a/datadog_lambda/tracing.py b/datadog_lambda/tracing.py index 5e338253..1d73de3e 100644 --- a/datadog_lambda/tracing.py +++ b/datadog_lambda/tracing.py @@ -5,8 +5,8 @@ import hashlib import logging import os -import json import base64 +import ujson as json from datetime import datetime, timezone from typing import Optional, Dict @@ -66,6 +66,8 @@ telemetry_writer.enable() +is_lambda_context = os.environ.get(XrayDaemon.FUNCTION_NAME_HEADER_NAME) != "" + propagator = HTTPPropagator() DD_TRACE_JAVA_TRACE_ID_PADDING = "00000000" @@ -93,7 +95,7 @@ def _convert_xray_sampling(xray_sampled): def _get_xray_trace_context(): - if not is_lambda_context(): + if not is_lambda_context: return None xray_trace_entity = parse_xray_header( @@ -109,11 +111,7 @@ def _get_xray_trace_context(): logger.debug( "Converted trace context %s from X-Ray segment %s", trace_context, - ( - xray_trace_entity["trace_id"], - xray_trace_entity["parent_id"], - xray_trace_entity["sampled"], - ), + xray_trace_entity, ) return trace_context @@ -124,7 +122,9 @@ def _get_dd_trace_py_context(): return None logger.debug( - "found dd trace context: %s", (span.context.trace_id, span.context.span_id) + "found dd trace context: trace_id=%s span_id=%s", + span.context.trace_id, + span.context.span_id, ) return span.context @@ -235,37 +235,31 @@ def extract_context_from_sqs_or_sns_event_or_context(event, lambda_context): # logic to deal with SNS => SQS event if "body" in first_record: - body_str = first_record.get("body", {}) + body_str = first_record.get("body") try: body = json.loads(body_str) if body.get("Type", "") == "Notification" and "TopicArn" in body: logger.debug("Found SNS message inside SQS event") first_record = get_first_record(create_sns_event(body)) except Exception: - first_record = event.get("Records")[0] pass - msg_attributes = first_record.get( - "messageAttributes", - first_record.get("Sns", {}).get("MessageAttributes", {}), - ) - dd_payload = msg_attributes.get("_datadog", {}) + msg_attributes = first_record.get("messageAttributes") + if msg_attributes is None: + sns_record = first_record.get("Sns") or {} + msg_attributes = sns_record.get("MessageAttributes") or {} + dd_payload = msg_attributes.get("_datadog") if dd_payload: # SQS uses dataType and binaryValue/stringValue # SNS uses Type and Value dd_json_data = None - dd_json_data_type = dd_payload.get("Type", dd_payload.get("dataType", "")) + dd_json_data_type = dd_payload.get("Type") or dd_payload.get("dataType") if dd_json_data_type == "Binary": - dd_json_data = dd_payload.get( - "binaryValue", - dd_payload.get("Value", r"{}"), - ) - dd_json_data = base64.b64decode(dd_json_data) + dd_json_data = dd_payload.get("binaryValue") or dd_payload.get("Value") + if dd_json_data: + dd_json_data = base64.b64decode(dd_json_data) elif dd_json_data_type == "String": - dd_json_data = dd_payload.get( - "stringValue", - dd_payload.get("Value", r"{}"), - ) + dd_json_data = dd_payload.get("stringValue") or dd_payload.get("Value") else: logger.debug( "Datadog Lambda Python only supports extracting trace" @@ -278,23 +272,25 @@ def extract_context_from_sqs_or_sns_event_or_context(event, lambda_context): else: # Handle case where trace context is injected into attributes.AWSTraceHeader # example: Root=1-654321ab-000000001234567890abcdef;Parent=0123456789abcdef;Sampled=1 - x_ray_header = first_record.get("attributes", {}).get("AWSTraceHeader") - if x_ray_header: - x_ray_context = parse_xray_header(x_ray_header) - trace_id_parts = x_ray_context.get("trace_id", "").split("-") - if len(trace_id_parts) > 2 and trace_id_parts[2].startswith( - DD_TRACE_JAVA_TRACE_ID_PADDING - ): - # If it starts with eight 0's padding, - # then this AWSTraceHeader contains Datadog injected trace context - logger.debug( - "Found dd-trace injected trace context from AWSTraceHeader" - ) - return Context( - trace_id=int(trace_id_parts[2][8:], 16), - span_id=int(int(x_ray_context["parent_id"], 16)), - sampling_priority=float(x_ray_context["sampled"]), - ) + attrs = first_record.get("attributes") + if attrs: + x_ray_header = attrs.get("AWSTraceHeader") + if x_ray_header: + x_ray_context = parse_xray_header(x_ray_header) + trace_id_parts = x_ray_context.get("trace_id", "").split("-") + if len(trace_id_parts) > 2 and trace_id_parts[2].startswith( + DD_TRACE_JAVA_TRACE_ID_PADDING + ): + # If it starts with eight 0's padding, + # then this AWSTraceHeader contains Datadog injected trace context + logger.debug( + "Found dd-trace injected trace context from AWSTraceHeader" + ) + return Context( + trace_id=int(trace_id_parts[2][8:], 16), + span_id=int(x_ray_context["parent_id"], 16), + sampling_priority=float(x_ray_context["sampled"]), + ) return extract_context_from_lambda_context(lambda_context) except Exception as e: logger.debug("The trace extractor returned with error %s", e) @@ -339,21 +335,22 @@ def extract_context_from_kinesis_event(event, lambda_context): """ try: record = get_first_record(event) - data = record.get("kinesis", {}).get("data", None) + kinesis = record.get("kinesis") + if not kinesis: + return extract_context_from_lambda_context(lambda_context) + data = kinesis.get("data") if data: b64_bytes = data.encode("ascii") str_bytes = base64.b64decode(b64_bytes) data_str = str_bytes.decode("ascii") data_obj = json.loads(data_str) dd_ctx = data_obj.get("_datadog") - - if not dd_ctx: - return extract_context_from_lambda_context(lambda_context) - - return propagator.extract(dd_ctx) + if dd_ctx: + return propagator.extract(dd_ctx) except Exception as e: logger.debug("The trace extractor returned with error %s", e) - return extract_context_from_lambda_context(lambda_context) + + return extract_context_from_lambda_context(lambda_context) def _deterministic_md5_hash(s: str) -> int: @@ -380,7 +377,7 @@ def extract_context_from_step_functions(event, lambda_context): state_entered_time = event.get("State").get("EnteredTime") trace_id = _deterministic_md5_hash(execution_id) parent_id = _deterministic_md5_hash( - execution_id + "#" + state_name + "#" + state_entered_time + f"{execution_id}#{state_name}#{state_entered_time}" ) sampling_priority = SamplingPriority.AUTO_KEEP return Context( @@ -396,11 +393,7 @@ def extract_context_custom_extractor(extractor, event, lambda_context): Extract Datadog trace context using a custom trace extractor function """ try: - ( - trace_id, - parent_id, - sampling_priority, - ) = extractor(event, lambda_context) + trace_id, parent_id, sampling_priority = extractor(event, lambda_context) return Context( trace_id=int(trace_id), span_id=int(parent_id), @@ -426,15 +419,20 @@ def is_authorizer_response(response) -> bool: def get_injected_authorizer_data(event, is_http_api) -> dict: try: - authorizer_headers = event.get("requestContext", {}).get("authorizer") + req_ctx = event.get("requestContext") + if not req_ctx: + return None + authorizer_headers = req_ctx.get("authorizer") if not authorizer_headers: return None - dd_data_raw = ( - authorizer_headers.get("lambda", {}).get("_datadog") - if is_http_api - else authorizer_headers.get("_datadog") - ) + if is_http_api: + lambda_hdr = authorizer_headers.get("lambda") + if not lambda_hdr: + return None + dd_data_raw = lambda_hdr.get("_datadog") + else: + dd_data_raw = authorizer_headers.get("_datadog") if not dd_data_raw: return None @@ -448,16 +446,19 @@ def get_injected_authorizer_data(event, is_http_api) -> dict: # that case, we use the injected Authorizing_Request_Id to tell if it's cached. But token # authorizers don't pass on the requestId. The Authorizing_Request_Id can't work for all # cases neither. As a result, we combine both methods as shown below. - if authorizer_headers.get("integrationLatency", 0) > 0 or event.get( - "requestContext", {} - ).get("requestId") == injected_data.get(Headers.Authorizing_Request_Id): + if authorizer_headers.get("integrationLatency", 0) > 0: return injected_data - else: + req_ctx = event.get("requestContext") + if not req_ctx: return None + if req_ctx.get("requestId") == injected_data.get( + Headers.Authorizing_Request_Id + ): + return injected_data + return None except Exception as e: logger.debug("Failed to check if invocated by an authorizer. error %s", e) - return None def extract_dd_trace_context( @@ -529,8 +530,8 @@ def get_dd_trace_context_obj(): xray_context = _get_xray_trace_context() # xray (sub)segment except Exception as e: logger.debug( - "get_dd_trace_context couldn't read from segment from x-ray, with error %s" - % e + "get_dd_trace_context couldn't read from segment from x-ray, with error %s", + e, ) if not xray_context: return None @@ -569,7 +570,7 @@ def set_correlation_ids(): TODO: Remove me when Datadog tracer is natively supported in Lambda. """ - if not is_lambda_context(): + if not is_lambda_context: logger.debug("set_correlation_ids is only supported in LambdaContext") return if dd_tracing_enabled: @@ -613,14 +614,6 @@ def inject_correlation_ids(): logger.debug("logs injection configured") -def is_lambda_context(): - """ - Return True if the X-Ray context is `LambdaContext`, rather than the - regular `Context` (e.g., when testing lambda functions locally). - """ - return os.environ.get(XrayDaemon.FUNCTION_NAME_HEADER_NAME, "") != "" - - def set_dd_trace_py_root(trace_context_source, merge_xray_traces): if trace_context_source == TraceContextSource.EVENT or merge_xray_traces: context = Context( @@ -635,8 +628,9 @@ def set_dd_trace_py_root(trace_context_source, merge_xray_traces): tracer.context_provider.activate(context) logger.debug( - "Set dd trace root context to: %s", - (context.trace_id, context.span_id), + "Set dd trace root context to: trace_id=%s span_id=%s", + context.trace_id, + context.span_id, ) @@ -697,9 +691,7 @@ def create_inferred_span( event_source.to_string(), e, ) - return None logger.debug("Unable to infer a span: unknown event type") - return None def create_service_mapping(val): @@ -721,20 +713,22 @@ def determine_service_name(service_mapping, specific_key, generic_key, default_v return service_name -service_mapping = {} # Initialization code service_mapping_str = os.getenv("DD_SERVICE_MAPPING", "") service_mapping = create_service_mapping(service_mapping_str) +_dd_origin = {"_dd.origin": "lambda"} + def create_inferred_span_from_lambda_function_url_event(event, context): request_context = event.get("requestContext") api_id = request_context.get("apiId") domain = request_context.get("domainName") service_name = determine_service_name(service_mapping, api_id, "lambda_url", domain) - method = request_context.get("http", {}).get("method") - path = request_context.get("http", {}).get("path") - resource = "{0} {1}".format(method, path) + http = request_context.get("http") + method = http.get("method") if http else None + path = http.get("path") if http else None + resource = f"{method} {path}" tags = { "operation_name": "aws.lambda.url", "http.url": domain + path, @@ -744,25 +738,23 @@ def create_inferred_span_from_lambda_function_url_event(event, context): "request_id": context.aws_request_id, } request_time_epoch = request_context.get("timeEpoch") - args = { - "service": service_name, - "resource": resource, - "span_type": "http", - } - tracer.set_tags( - {"_dd.origin": "lambda"} - ) # function urls don't count as lambda_inferred, + tracer.set_tags(_dd_origin) # function urls don't count as lambda_inferred, # because they're in the same service as the inferring lambda function - span = tracer.trace("aws.lambda.url", **args) + span = tracer.trace( + "aws.lambda.url", service=service_name, resource=resource, span_type="http" + ) InferredSpanInfo.set_tags(tags, tag_source="self", synchronicity="sync") if span: span.set_tags(tags) - span.start = request_time_epoch / 1000 + span.start_ns = int(request_time_epoch) * 1e6 return span def is_api_gateway_invocation_async(event): - return event.get("headers", {}).get("X-Amz-Invocation-Type") == "Event" + hdrs = event.get("headers") + if not hdrs: + return False + return hdrs.get("X-Amz-Invocation-Type") == "Event" def insert_upstream_authorizer_span( @@ -862,7 +854,7 @@ def create_inferred_span_from_api_gateway_websocket_event( "resource": endpoint, "span_type": "web", } - tracer.set_tags({"_dd.origin": "lambda"}) + tracer.set_tags(_dd_origin) upstream_authorizer_span = None finish_time_ns = None if decode_authorizer_context: @@ -893,7 +885,8 @@ def create_inferred_span_from_api_gateway_event( ) method = event.get("httpMethod") path = event.get("path") - resource = "{0} {1}".format(method, path) + resource_path = _get_resource_path(event, request_context) + resource = f"{method} {resource_path}" tags = { "operation_name": "aws.apigateway.rest", "http.url": domain + path, @@ -915,7 +908,7 @@ def create_inferred_span_from_api_gateway_event( "resource": resource, "span_type": "http", } - tracer.set_tags({"_dd.origin": "lambda"}) + tracer.set_tags(_dd_origin) upstream_authorizer_span = None finish_time_ns = None if decode_authorizer_context: @@ -936,6 +929,16 @@ def create_inferred_span_from_api_gateway_event( return span +def _get_resource_path(event, request_context): + route_key = request_context.get("routeKey") or "" + if "{" in route_key: + try: + return route_key.split(" ")[1] + except Exception as e: + logger.debug("Error parsing routeKey: %s", e) + return event.get("rawPath") or request_context.get("resourcePath") or route_key + + def create_inferred_span_from_http_api_event( event, context, decode_authorizer_context: bool = True ): @@ -945,17 +948,19 @@ def create_inferred_span_from_http_api_event( service_name = determine_service_name( service_mapping, api_id, "lambda_api_gateway", domain ) - method = request_context.get("http", {}).get("method") + http = request_context.get("http") or {} + method = http.get("method") path = event.get("rawPath") - resource = "{0} {1}".format(method, path) + resource_path = _get_resource_path(event, request_context) + resource = f"{method} {resource_path}" tags = { "operation_name": "aws.httpapi", "endpoint": path, "http.url": domain + path, - "http.method": request_context.get("http", {}).get("method"), - "http.protocol": request_context.get("http", {}).get("protocol"), - "http.source_ip": request_context.get("http", {}).get("sourceIp"), - "http.user_agent": request_context.get("http", {}).get("userAgent"), + "http.method": http.get("method"), + "http.protocol": http.get("protocol"), + "http.source_ip": http.get("sourceIp"), + "http.user_agent": http.get("userAgent"), "resource_names": resource, "request_id": context.aws_request_id, "apiid": api_id, @@ -967,12 +972,7 @@ def create_inferred_span_from_http_api_event( InferredSpanInfo.set_tags(tags, tag_source="self", synchronicity="async") else: InferredSpanInfo.set_tags(tags, tag_source="self", synchronicity="sync") - args = { - "service": service_name, - "resource": resource, - "span_type": "http", - } - tracer.set_tags({"_dd.origin": "lambda"}) + tracer.set_tags(_dd_origin) inferred_span_start_ns = request_time_epoch_ms * 1e6 if decode_authorizer_context: injected_authorizer_data = get_injected_authorizer_data(event, True) @@ -980,7 +980,9 @@ def create_inferred_span_from_http_api_event( inferred_span_start_ns = injected_authorizer_data.get( Headers.Parent_Span_Finish_Time ) - span = tracer.trace("aws.httpapi", **args) + span = tracer.trace( + "aws.httpapi", service=service_name, resource=resource, span_type="http" + ) if span: span.set_tags(tags) span.start_ns = int(inferred_span_start_ns) @@ -996,21 +998,17 @@ def create_inferred_span_from_sqs_event(event, context): service_name = determine_service_name( service_mapping, queue_name, "lambda_sqs", "sqs" ) + attrs = event_record.get("attributes") or {} tags = { "operation_name": "aws.sqs", "resource_names": queue_name, "queuename": queue_name, "event_source_arn": event_source_arn, "receipt_handle": event_record.get("receiptHandle"), - "sender_id": event_record.get("attributes", {}).get("SenderId"), + "sender_id": attrs.get("SenderId"), } InferredSpanInfo.set_tags(tags, tag_source="self", synchronicity="async") - request_time_epoch = event_record.get("attributes", {}).get("SentTimestamp") - args = { - "service": service_name, - "resource": queue_name, - "span_type": "web", - } + request_time_epoch = attrs.get("SentTimestamp") start_time = int(request_time_epoch) / 1000 upstream_span = None @@ -1039,15 +1037,17 @@ def create_inferred_span_from_sqs_event(event, context): except Exception as e: logger.debug( - "Unable to create upstream span from SQS message, with error %s" % e + "Unable to create upstream span from SQS message, with error %s", e ) pass # trace context needs to be set again as it is reset # when sns_span.finish executes tracer.context_provider.activate(trace_ctx) - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.sqs", **args) + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.sqs", service=service_name, resource=queue_name, span_type="web" + ) if span: span.set_tags(tags) span.start = start_time @@ -1059,8 +1059,8 @@ def create_inferred_span_from_sqs_event(event, context): def create_inferred_span_from_sns_event(event, context): event_record = get_first_record(event) - sns_message = event_record.get("Sns") - topic_arn = event_record.get("Sns", {}).get("TopicArn") + sns_message = event_record.get("Sns") or {} + topic_arn = sns_message.get("TopicArn") topic_name = topic_arn.split(":")[-1] service_name = determine_service_name( service_mapping, topic_name, "lambda_sns", "sns" @@ -1075,21 +1075,19 @@ def create_inferred_span_from_sns_event(event, context): } # Subject not available in SNS => SQS scenario - if "Subject" in sns_message and sns_message["Subject"]: - tags["subject"] = sns_message.get("Subject") + subject = sns_message.get("Subject") + if subject: + tags["subject"] = subject InferredSpanInfo.set_tags(tags, tag_source="self", synchronicity="async") sns_dt_format = "%Y-%m-%dT%H:%M:%S.%fZ" - timestamp = event_record.get("Sns", {}).get("Timestamp") + timestamp = sns_message.get("Timestamp") dt = datetime.strptime(timestamp, sns_dt_format) - args = { - "service": service_name, - "resource": topic_name, - "span_type": "web", - } - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.sns", **args) + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.sns", service=service_name, resource=topic_name, span_type="web" + ) if span: span.set_tags(tags) span.start = dt.replace(tzinfo=timezone.utc).timestamp() @@ -1105,6 +1103,7 @@ def create_inferred_span_from_kinesis_event(event, context): service_name = determine_service_name( service_mapping, stream_name, "lambda_kinesis", "kinesis" ) + kinesis = event_record.get("kinesis") or {} tags = { "operation_name": "aws.kinesis", "resource_names": stream_name, @@ -1114,20 +1113,15 @@ def create_inferred_span_from_kinesis_event(event, context): "event_id": event_id, "event_name": event_record.get("eventName"), "event_version": event_record.get("eventVersion"), - "partition_key": event_record.get("kinesis", {}).get("partitionKey"), + "partition_key": kinesis.get("partitionKey"), } InferredSpanInfo.set_tags(tags, tag_source="self", synchronicity="async") - request_time_epoch = event_record.get("kinesis", {}).get( - "approximateArrivalTimestamp" - ) + request_time_epoch = kinesis.get("approximateArrivalTimestamp") - args = { - "service": service_name, - "resource": stream_name, - "span_type": "web", - } - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.kinesis", **args) + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.kinesis", service=service_name, resource=stream_name, span_type="web" + ) if span: span.set_tags(tags) span.start = request_time_epoch @@ -1141,7 +1135,7 @@ def create_inferred_span_from_dynamodb_event(event, context): service_name = determine_service_name( service_mapping, table_name, "lambda_dynamodb", "dynamodb" ) - dynamodb_message = event_record.get("dynamodb") + dynamodb_message = event_record.get("dynamodb") or {} tags = { "operation_name": "aws.dynamodb", "resource_names": table_name, @@ -1154,16 +1148,11 @@ def create_inferred_span_from_dynamodb_event(event, context): "size_bytes": str(dynamodb_message.get("SizeBytes")), } InferredSpanInfo.set_tags(tags, synchronicity="async", tag_source="self") - request_time_epoch = event_record.get("dynamodb", {}).get( - "ApproximateCreationDateTime" + request_time_epoch = dynamodb_message.get("ApproximateCreationDateTime") + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.dynamodb", service=service_name, resource=table_name, span_type="web" ) - args = { - "service": service_name, - "resource": table_name, - "span_type": "web", - } - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.dynamodb", **args) if span: span.set_tags(tags) @@ -1173,7 +1162,10 @@ def create_inferred_span_from_dynamodb_event(event, context): def create_inferred_span_from_s3_event(event, context): event_record = get_first_record(event) - bucket_name = event_record.get("s3", {}).get("bucket", {}).get("name") + s3 = event_record.get("s3") or {} + bucket = s3.get("bucket") or {} + obj = s3.get("object") or {} + bucket_name = bucket.get("name") service_name = determine_service_name( service_mapping, bucket_name, "lambda_s3", "s3" ) @@ -1182,23 +1174,20 @@ def create_inferred_span_from_s3_event(event, context): "resource_names": bucket_name, "event_name": event_record.get("eventName"), "bucketname": bucket_name, - "bucket_arn": event_record.get("s3", {}).get("bucket", {}).get("arn"), - "object_key": event_record.get("s3", {}).get("object", {}).get("key"), - "object_size": str(event_record.get("s3", {}).get("object", {}).get("size")), - "object_etag": event_record.get("s3", {}).get("object", {}).get("eTag"), + "bucket_arn": bucket.get("arn"), + "object_key": obj.get("key"), + "object_size": str(obj.get("size")), + "object_etag": obj.get("eTag"), } InferredSpanInfo.set_tags(tags, synchronicity="async", tag_source="self") dt_format = "%Y-%m-%dT%H:%M:%S.%fZ" timestamp = event_record.get("eventTime") dt = datetime.strptime(timestamp, dt_format) - args = { - "service": service_name, - "resource": bucket_name, - "span_type": "web", - } - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.s3", **args) + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.s3", service=service_name, resource=bucket_name, span_type="web" + ) if span: span.set_tags(tags) span.start = dt.replace(tzinfo=timezone.utc).timestamp() @@ -1224,13 +1213,10 @@ def create_inferred_span_from_eventbridge_event(event, context): timestamp = event.get("time") dt = datetime.strptime(timestamp, dt_format) - args = { - "service": service_name, - "resource": source, - "span_type": "web", - } - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.eventbridge", **args) + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.eventbridge", service=service_name, resource=source, span_type="web" + ) if span: span.set_tags(tags) span.start = dt.replace(tzinfo=timezone.utc).timestamp() @@ -1247,7 +1233,7 @@ def create_function_execution_span( trigger_tags, parent_span=None, ): - tags = {} + tags = None if context: function_arn = (context.invoked_function_arn or "").lower() tk = function_arn.split(":") @@ -1266,18 +1252,19 @@ def create_function_execution_span( "dd_trace": ddtrace_version, "span.name": "aws.lambda", } + tags = tags or {} if is_proactive_init: tags["proactive_initialization"] = str(is_proactive_init).lower() if trace_context_source == TraceContextSource.XRAY and merge_xray_traces: tags["_dd.parent_source"] = trace_context_source tags.update(trigger_tags) - args = { - "service": "aws.lambda", - "resource": function_name, - "span_type": "serverless", - } - tracer.set_tags({"_dd.origin": "lambda"}) - span = tracer.trace("aws.lambda", **args) + tracer.set_tags(_dd_origin) + span = tracer.trace( + "aws.lambda", + service="aws.lambda", + resource=function_name, + span_type="serverless", + ) if span: span.set_tags(tags) if parent_span: diff --git a/datadog_lambda/trigger.py b/datadog_lambda/trigger.py index bbb44b30..68531ebf 100644 --- a/datadog_lambda/trigger.py +++ b/datadog_lambda/trigger.py @@ -5,7 +5,7 @@ import base64 import gzip -import json +import ujson as json from io import BytesIO, BufferedReader from enum import Enum from typing import Any @@ -110,10 +110,10 @@ def get_first_record(event): def parse_event_source(event: dict) -> _EventSource: """Determines the source of the trigger event""" - if type(event) is not dict: + if not isinstance(event, dict): return _EventSource(EventTypes.UNKNOWN) - event_source = _EventSource(EventTypes.UNKNOWN) + event_source = None request_context = event.get("requestContext") if request_context and request_context.get("stage"): @@ -126,7 +126,7 @@ def parse_event_source(event: dict) -> _EventSource: event_source.subtype = EventSubtypes.API_GATEWAY if "routeKey" in event: event_source.subtype = EventSubtypes.HTTP_API - if event.get("requestContext", {}).get("messageDirection"): + if request_context.get("messageDirection"): event_source.subtype = EventSubtypes.WEBSOCKET if request_context and request_context.get("elb"): @@ -151,10 +151,9 @@ def parse_event_source(event: dict) -> _EventSource: event_record = get_first_record(event) if event_record: - aws_event_source = event_record.get( - "eventSource", event_record.get("EventSource") + aws_event_source = event_record.get("eventSource") or event_record.get( + "EventSource" ) - if aws_event_source == "aws:dynamodb": event_source = _EventSource(EventTypes.DYNAMODB) if aws_event_source == "aws:kinesis": @@ -165,11 +164,10 @@ def parse_event_source(event: dict) -> _EventSource: event_source = _EventSource(EventTypes.SNS) if aws_event_source == "aws:sqs": event_source = _EventSource(EventTypes.SQS) - if event_record.get("cf"): event_source = _EventSource(EventTypes.CLOUDFRONT) - return event_source + return event_source or _EventSource(EventTypes.UNKNOWN) def detect_lambda_function_url_domain(domain: str) -> bool: @@ -193,20 +191,26 @@ def parse_event_source_arn(source: _EventSource, event: dict, context: Any) -> s event_record = get_first_record(event) # e.g. arn:aws:s3:::lambda-xyz123-abc890 if source.to_string() == "s3": - return event_record.get("s3", {}).get("bucket", {}).get("arn") + s3 = event_record.get("s3") + if s3: + bucket = s3.get("bucket") + if bucket: + return bucket.get("arn") + return None # e.g. arn:aws:sns:us-east-1:123456789012:sns-lambda if source.to_string() == "sns": - return event_record.get("Sns", {}).get("TopicArn") + sns = event_record.get("Sns") + if sns: + return sns.get("TopicArn") + return None # e.g. arn:aws:cloudfront::123456789012:distribution/ABC123XYZ if source.event_type == EventTypes.CLOUDFRONT: distribution_id = ( event_record.get("cf", {}).get("config", {}).get("distributionId") ) - return "arn:{}:cloudfront::{}:distribution/{}".format( - aws_arn, account_id, distribution_id - ) + return f"arn:{aws_arn}:cloudfront::{account_id}:distribution/{distribution_id}" # e.g. arn:aws:lambda:::url:: if source.equals(EventTypes.LAMBDA_FUNCTION_URL): @@ -223,14 +227,18 @@ def parse_event_source_arn(source: _EventSource, event: dict, context: Any) -> s # e.g. arn:aws:apigateway:us-east-1::/restapis/xyz123/stages/default if source.event_type == EventTypes.API_GATEWAY: request_context = event.get("requestContext") - return "arn:{}:apigateway:{}::/restapis/{}/stages/{}".format( - aws_arn, region, request_context.get("apiId"), request_context.get("stage") - ) + api_id = request_context.get("apiId") + stage = request_context.get("stage") + return f"arn:{aws_arn}:apigateway:{region}::/restapis/{api_id}/stages/{stage}" # e.g. arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/lambda-xyz/123 if source.event_type == EventTypes.ALB: request_context = event.get("requestContext") - return request_context.get("elb", {}).get("targetGroupArn") + if request_context: + elb = request_context.get("elb") + if elb: + return elb.get("targetGroupArn") + return None # e.g. arn:aws:logs:us-west-1:123456789012:log-group:/my-log-group-xyz if source.event_type == EventTypes.CLOUDWATCH_LOGS: @@ -240,9 +248,7 @@ def parse_event_source_arn(source: _EventSource, event: dict, context: Any) -> s data = b"".join(BufferedReader(decompress_stream)) logs = json.loads(data) log_group = logs.get("logGroup", "cloudwatch") - return "arn:{}:logs:{}:{}:log-group:{}".format( - aws_arn, region, account_id, log_group - ) + return f"arn:{aws_arn}:logs:{region}:{account_id}:log-group:{log_group}" # e.g. arn:aws:events:us-east-1:123456789012:rule/my-schedule if source.event_type == EventTypes.CLOUDWATCH_EVENTS and event.get("resources"): @@ -296,6 +302,13 @@ def extract_http_tags(event): return http_tags +_http_event_types = ( + EventTypes.API_GATEWAY, + EventTypes.ALB, + EventTypes.LAMBDA_FUNCTION_URL, +) + + def extract_trigger_tags(event: dict, context: Any) -> dict: """ Parses the trigger event object to get tags to be added to the span metadata @@ -309,16 +322,15 @@ def extract_trigger_tags(event: dict, context: Any) -> dict: if event_source_arn: trigger_tags["function_trigger.event_source_arn"] = event_source_arn - if event_source.event_type in [ - EventTypes.API_GATEWAY, - EventTypes.ALB, - EventTypes.LAMBDA_FUNCTION_URL, - ]: + if event_source.event_type in _http_event_types: trigger_tags.update(extract_http_tags(event)) return trigger_tags +_str_http_triggers = [et.value for et in _http_event_types] + + def extract_http_status_code_tag(trigger_tags, response): """ If the Lambda was triggered by API Gateway, Lambda Function URL, or ALB, @@ -329,15 +341,7 @@ def extract_http_status_code_tag(trigger_tags, response): str_event_source = trigger_tags.get("function_trigger.event_source") # it would be cleaner if each event type was a constant object that # knew some properties about itself like this. - str_http_triggers = [ - et.value - for et in [ - EventTypes.API_GATEWAY, - EventTypes.LAMBDA_FUNCTION_URL, - EventTypes.ALB, - ] - ] - if str_event_source not in str_http_triggers: + if str_event_source not in _str_http_triggers: return status_code = "200" diff --git a/datadog_lambda/version.py b/datadog_lambda/version.py new file mode 100644 index 00000000..c7debb23 --- /dev/null +++ b/datadog_lambda/version.py @@ -0,0 +1 @@ +__version__ = "5.93.0" diff --git a/datadog_lambda/wrapper.py b/datadog_lambda/wrapper.py index 057f048c..ba31f2be 100644 --- a/datadog_lambda/wrapper.py +++ b/datadog_lambda/wrapper.py @@ -6,8 +6,8 @@ import os import logging import traceback +import ujson as json from importlib import import_module -import json from time import time_ns from datadog_lambda.extension import should_use_extension, flush_extension @@ -258,7 +258,9 @@ def _inject_authorizer_span_headers(self, request_id): injected_headers[Headers.Parent_Span_Finish_Time] = finish_time_ns if request_id is not None: injected_headers[Headers.Authorizing_Request_Id] = request_id - datadog_data = base64.b64encode(json.dumps(injected_headers).encode()).decode() + datadog_data = base64.b64encode( + json.dumps(injected_headers, escape_forward_slashes=False).encode() + ).decode() self.response.setdefault("context", {}) self.response["context"]["_datadog"] = datadog_data @@ -381,9 +383,8 @@ def _after(self, event, context): def format_err_with_traceback(e): - return "Error {}. Traceback: {}".format( - e, traceback.format_exc().replace("\n", "\r") - ) + tb = traceback.format_exc().replace("\n", "\r") + return f"Error {e}. Traceback: {tb}" datadog_lambda_wrapper = _LambdaDecorator diff --git a/datadog_lambda/xray.py b/datadog_lambda/xray.py index 88d108f5..002d13b1 100644 --- a/datadog_lambda/xray.py +++ b/datadog_lambda/xray.py @@ -1,47 +1,60 @@ import os import logging -import json import binascii import time import socket +import ujson as json from datadog_lambda.constants import XrayDaemon, XraySubsegment, TraceContextSource logger = logging.getLogger(__name__) -def get_xray_host_port(address): - if address == "": - logger.debug("X-Ray daemon env var not set, not sending sub-segment") - return None - parts = address.split(":") - if len(parts) <= 1: - logger.debug("X-Ray daemon env var not set, not sending sub-segment") - return None - port = int(parts[1]) - host = parts[0] - return (host, port) - - -def send(host_port_tuple, payload): - sock = None - try: - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - sock.setblocking(0) - sock.connect(host_port_tuple) - sock.send(payload.encode("utf-8")) - except Exception as e_send: - logger.error("Error occurred submitting to xray daemon: %s", str(e_send)) - try: - sock.close() - except Exception as e_close: - logger.error("Error while closing the socket: %s", str(e_close)) +class Socket(object): + def __init__(self): + self.sock = None + + @property + def host_port_tuple(self): + if not hasattr(self, "_host_port_tuple"): + self._host_port_tuple = self._get_xray_host_port( + os.environ.get(XrayDaemon.XRAY_DAEMON_ADDRESS, "") + ) + return self._host_port_tuple + + def send(self, payload): + if not self.sock: + self._connect() + try: + self.sock.send(payload.encode("utf-8")) + except Exception as e_send: + logger.error("Error occurred submitting to xray daemon: %s", e_send) + + def _connect(self): + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.sock.setblocking(0) + self.sock.connect(self.host_port_tuple) + + def _get_xray_host_port(self, address): + if address == "": + logger.debug("X-Ray daemon env var not set, not sending sub-segment") + return None + parts = address.split(":") + if len(parts) <= 1: + logger.debug("X-Ray daemon env var not set, not sending sub-segment") + return None + port = int(parts[1]) + host = parts[0] + return (host, port) + + +sock = Socket() def build_segment_payload(payload): if payload is None: return None - return '{"format": "json", "version": 1}' + "\n" + payload + return '{"format": "json", "version": 1}\n' + payload def parse_xray_header(raw_trace_id): @@ -89,16 +102,14 @@ def build_segment(context, key, metadata): key: metadata, } }, - } + }, + escape_forward_slashes=False, ) return segment def send_segment(key, metadata): - host_port_tuple = get_xray_host_port( - os.environ.get(XrayDaemon.XRAY_DAEMON_ADDRESS, "") - ) - if host_port_tuple is None: + if sock.host_port_tuple is None: return None context = parse_xray_header( os.environ.get(XrayDaemon.XRAY_TRACE_ID_HEADER_NAME, "") @@ -115,4 +126,4 @@ def send_segment(key, metadata): return None segment = build_segment(context, key, metadata) segment_payload = build_segment_payload(segment) - send(host_port_tuple, segment_payload) + sock.send(segment_payload) diff --git a/pyproject.toml b/pyproject.toml index 3536347f..1db09ba4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "datadog_lambda" -version = "5.92.0" +version = "5.93.0" description = "The Datadog AWS Lambda Library" authors = ["Datadog, Inc. "] license = "Apache-2.0" @@ -28,15 +28,15 @@ python = ">=3.8.0,<4" datadog = ">=0.41.0,<1.0.0" wrapt = "^1.11.2" ddtrace = ">=2.7.2" +ujson = ">=5.9.0" urllib3 = [ {version = "<2.0.0", python = "<3.11", optional = true}, {version = "<2.1.0", python = ">=3.11", optional = true}, ] -importlib_metadata = {version = "*", python = "<3.8"} boto3 = { version = "^1.28.0", optional = true } -typing_extensions = {version = "^4.0", python = "<3.8"} requests = { version ="^2.22.0", optional = true } pytest = { version= "^8.0.0", optional = true } +pytest-benchmark = { version = "^4.0", optional = true } flake8 = { version = "^5.0.4", optional = true } @@ -45,9 +45,13 @@ dev = [ "boto3", "flake8", "pytest", + "pytest-benchmark", "requests", ] [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +addopts = "--benchmark-disable --benchmark-autosave" diff --git a/scripts/check_layer_size.sh b/scripts/check_layer_size.sh index 84752fa1..977c0283 100755 --- a/scripts/check_layer_size.sh +++ b/scripts/check_layer_size.sh @@ -7,10 +7,9 @@ # Compares layer size to threshold, and fails if below that threshold -# 7 mb size limit set -e -MAX_LAYER_COMPRESSED_SIZE_KB=$(expr 4 \* 1024) -MAX_LAYER_UNCOMPRESSED_SIZE_KB=$(expr 13 \* 1024) +MAX_LAYER_COMPRESSED_SIZE_KB=$(expr 5 \* 1024) +MAX_LAYER_UNCOMPRESSED_SIZE_KB=$(expr 12 \* 1024) LAYER_FILES_PREFIX="datadog_lambda_py" diff --git a/scripts/publish_prod.sh b/scripts/publish_prod.sh index f88ac5e2..e138e3d8 100755 --- a/scripts/publish_prod.sh +++ b/scripts/publish_prod.sh @@ -53,10 +53,11 @@ if [ "$CONT" != "y" ]; then echo "Skipping updating package.json version" else echo - echo "Replacing version in pyproject.toml" + echo "Replacing version in pyproject.toml and datadog_lambda/version.py" echo poetry version ${NEW_VERSION} + echo "__version__ = \"${NEW_VERSION}\"" > datadog_lambda/version.py fi echo @@ -85,11 +86,11 @@ if [ "$CONT" != "y" ]; then else echo "Ensuring you have access to the AWS GovCloud account" ddsaml2aws login -a govcloud-us1-fed-human-engineering - AWS_PROFILE=govcloud-us1-fed-human-engineering aws sts get-caller-identity + aws-vault exec sso-govcloud-us1-fed-engineering -- aws sts get-caller-identity echo "Publishing layers to GovCloud AWS regions" ddsaml2aws login -a govcloud-us1-fed-human-engineering - VERSION=$LAYER_VERSION AWS_PROFILE=govcloud-us1-fed-human-engineering ./scripts/publish_layers.sh + VERSION=$LAYER_VERSION aws-vault exec sso-govcloud-us1-fed-engineering -- ./scripts/publish_layers.sh fi echo "Answer 'n' if GitLab already did this" @@ -110,7 +111,7 @@ if [ "$CONT" != "y" ]; then else echo echo 'Publishing updates to github' - git commit pyproject.toml -m "Bump version to ${NEW_VERSION}" + git commit pyproject.toml datadog_lambda/version.py -m "Bump version to ${NEW_VERSION}" git push origin main git tag "v$LAYER_VERSION" git push origin "refs/tags/v$LAYER_VERSION" diff --git a/tests/event_samples/api-gateway-v1-parametrized.json b/tests/event_samples/api-gateway-v1-parametrized.json new file mode 100644 index 00000000..65527ccb --- /dev/null +++ b/tests/event_samples/api-gateway-v1-parametrized.json @@ -0,0 +1,111 @@ +{ + "resource": "/user/{id}", + "path": "/user/42", + "httpMethod": "GET", + "headers": { + "Accept": "*/*", + "CloudFront-Forwarded-Proto": "https", + "CloudFront-Is-Desktop-Viewer": "true", + "CloudFront-Is-Mobile-Viewer": "false", + "CloudFront-Is-SmartTV-Viewer": "false", + "CloudFront-Is-Tablet-Viewer": "false", + "CloudFront-Viewer-ASN": "7922", + "CloudFront-Viewer-Country": "US", + "Host": "mcwkra0ya4.execute-api.sa-east-1.amazonaws.com", + "User-Agent": "curl/8.1.2", + "Via": "2.0 xxx.cloudfront.net (CloudFront)", + "X-Amz-Cf-Id": "Tz3yUVcJkwOhQGqZgKTzrEHqAoOd8ZprYAHpg2S6BNxdd-Ym79pb6g==", + "X-Amzn-Trace-Id": "Root=1-65f49d20-7ba106216238dd0078a5db31", + "X-Forwarded-For": "76.115.124.192, 15.158.54.119", + "X-Forwarded-Port": "443", + "X-Forwarded-Proto": "https" + }, + "multiValueHeaders": { + "Accept": [ + "*/*" + ], + "CloudFront-Forwarded-Proto": [ + "https" + ], + "CloudFront-Is-Desktop-Viewer": [ + "true" + ], + "CloudFront-Is-Mobile-Viewer": [ + "false" + ], + "CloudFront-Is-SmartTV-Viewer": [ + "false" + ], + "CloudFront-Is-Tablet-Viewer": [ + "false" + ], + "CloudFront-Viewer-ASN": [ + "7922" + ], + "CloudFront-Viewer-Country": [ + "US" + ], + "Host": [ + "mcwkra0ya4.execute-api.sa-east-1.amazonaws.com" + ], + "User-Agent": [ + "curl/8.1.2" + ], + "Via": [ + "2.0 xxx.cloudfront.net (CloudFront)" + ], + "X-Amz-Cf-Id": [ + "Tz3yUVcJkwOhQGqZgKTzrEHqAoOd8ZprYAHpg2S6BNxdd-Ym79pb6g==" + ], + "X-Amzn-Trace-Id": [ + "Root=1-65f49d20-7ba106216238dd0078a5db31" + ], + "X-Forwarded-For": [ + "76.115.124.192, 15.158.54.119" + ], + "X-Forwarded-Port": [ + "443" + ], + "X-Forwarded-Proto": [ + "https" + ] + }, + "queryStringParameters": null, + "multiValueQueryStringParameters": null, + "pathParameters": { + "id": "42" + }, + "stageVariables": null, + "requestContext": { + "resourceId": "ojg3nk", + "resourcePath": "/user/{id}", + "httpMethod": "GET", + "extendedRequestId": "Ur19IHYDmjQEU5A=", + "requestTime": "15/Mar/2024:19:10:24 +0000", + "path": "/dev/user/42", + "accountId": "425362996713", + "protocol": "HTTP/1.1", + "stage": "dev", + "domainPrefix": "mcwkra0ya4", + "requestTimeEpoch": 1710529824520, + "requestId": "e16399f7-e984-463a-9931-745ba021a27f", + "identity": { + "cognitoIdentityPoolId": null, + "accountId": null, + "cognitoIdentityId": null, + "caller": null, + "sourceIp": "76.115.124.192", + "principalOrgId": null, + "accessKey": null, + "cognitoAuthenticationType": null, + "cognitoAuthenticationProvider": null, + "userArn": null, + "userAgent": "curl/8.1.2", + "user": null + }, + "domainName": "mcwkra0ya4.execute-api.sa-east-1.amazonaws.com", + "apiId": "mcwkra0ya4" + }, + "body": null, + "isBase64Encoded": false +} diff --git a/tests/event_samples/api-gateway-v2-parametrized.json b/tests/event_samples/api-gateway-v2-parametrized.json new file mode 100644 index 00000000..89ff72b9 --- /dev/null +++ b/tests/event_samples/api-gateway-v2-parametrized.json @@ -0,0 +1,38 @@ +{ + "version": "2.0", + "routeKey": "GET /user/{id}", + "rawPath": "/user/42", + "rawQueryString": "", + "headers": { + "accept": "*/*", + "content-length": "0", + "host": "9vj54we5ih.execute-api.sa-east-1.amazonaws.com", + "user-agent": "curl/8.1.2", + "x-amzn-trace-id": "Root=1-65f49d71-505edb3b69b8abd513cfa08b", + "x-forwarded-for": "76.115.124.192", + "x-forwarded-port": "443", + "x-forwarded-proto": "https" + }, + "requestContext": { + "accountId": "425362996713", + "apiId": "9vj54we5ih", + "domainName": "9vj54we5ih.execute-api.sa-east-1.amazonaws.com", + "domainPrefix": "9vj54we5ih", + "http": { + "method": "GET", + "path": "/user/42", + "protocol": "HTTP/1.1", + "sourceIp": "76.115.124.192", + "userAgent": "curl/8.1.2" + }, + "requestId": "Ur2JtjEfGjQEPOg=", + "routeKey": "GET /user/{id}", + "stage": "$default", + "time": "15/Mar/2024:19:11:45 +0000", + "timeEpoch": 1710529905066 + }, + "pathParameters": { + "id": "42" + }, + "isBase64Encoded": false +} diff --git a/tests/integration/snapshots/logs/async-metrics_python310.log b/tests/integration/snapshots/logs/async-metrics_python310.log index e2101885..bec8aa8a 100644 --- a/tests/integration/snapshots/logs/async-metrics_python310.log +++ b/tests/integration/snapshots/logs/async-metrics_python310.log @@ -67,11 +67,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -237,16 +237,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -397,11 +397,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -566,16 +566,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -736,16 +736,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -899,16 +899,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1063,16 +1063,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1226,16 +1226,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1395,16 +1395,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/async-metrics_python311.log b/tests/integration/snapshots/logs/async-metrics_python311.log index 8d5419c1..1e6a384d 100644 --- a/tests/integration/snapshots/logs/async-metrics_python311.log +++ b/tests/integration/snapshots/logs/async-metrics_python311.log @@ -67,11 +67,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -237,16 +237,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -397,11 +397,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -566,16 +566,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -708,6 +708,7 @@ START } HTTP GET https://datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", "Accept:*/*", "Connection:keep-alive", "User-Agent:python-requests/X.X.X", "traceparent:XXX", "tracestate:XXX HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", "Accept:*/*", "Connection:keep-alive", "User-Agent:python-requests/X.X.X", "traceparent:XXX", "tracestate:XXX +END Duration: XXXX ms Memory Used: XXXX MB { "traces": [ [ @@ -736,16 +737,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -834,7 +835,6 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " ] ] } -END Duration: XXXX ms Memory Used: XXXX MB START { "m": "aws.lambda.enhanced.invocations", @@ -899,16 +899,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1063,16 +1063,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1226,16 +1226,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1395,16 +1395,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/async-metrics_python312.log b/tests/integration/snapshots/logs/async-metrics_python312.log index 0132e54e..fdf90e98 100644 --- a/tests/integration/snapshots/logs/async-metrics_python312.log +++ b/tests/integration/snapshots/logs/async-metrics_python312.log @@ -67,11 +67,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -237,16 +237,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -397,11 +397,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -566,16 +566,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -736,16 +736,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -899,16 +899,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1063,16 +1063,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1226,16 +1226,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1395,16 +1395,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/async-metrics_python38.log b/tests/integration/snapshots/logs/async-metrics_python38.log index e5b755ac..50a9f700 100644 --- a/tests/integration/snapshots/logs/async-metrics_python38.log +++ b/tests/integration/snapshots/logs/async-metrics_python38.log @@ -67,11 +67,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -237,16 +237,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -397,11 +397,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -566,16 +566,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -736,16 +736,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -899,16 +899,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1063,16 +1063,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1226,16 +1226,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1395,16 +1395,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/async-metrics_python39.log b/tests/integration/snapshots/logs/async-metrics_python39.log index d02e097d..d411dd1e 100644 --- a/tests/integration/snapshots/logs/async-metrics_python39.log +++ b/tests/integration/snapshots/logs/async-metrics_python39.log @@ -67,11 +67,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -237,16 +237,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -397,11 +397,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -566,16 +566,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -736,16 +736,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -899,16 +899,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1037,6 +1037,7 @@ START } HTTP GET https://datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", "Accept:*/*", "Connection:keep-alive", "User-Agent:python-requests/X.X.X", "traceparent:XXX", "tracestate:XXX HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", "Accept:*/*", "Connection:keep-alive", "User-Agent:python-requests/X.X.X", "traceparent:XXX", "tracestate:XXX +END Duration: XXXX ms Memory Used: XXXX MB { "traces": [ [ @@ -1063,16 +1064,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1161,7 +1162,6 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " ] ] } -END Duration: XXXX ms Memory Used: XXXX MB START { "m": "aws.lambda.enhanced.invocations", @@ -1226,16 +1226,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1395,16 +1395,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/sync-metrics_python310.log b/tests/integration/snapshots/logs/sync-metrics_python310.log index e97b0cb2..97a0cf4d 100644 --- a/tests/integration/snapshots/logs/sync-metrics_python310.log +++ b/tests/integration/snapshots/logs/sync-metrics_python310.log @@ -47,11 +47,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -236,16 +236,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -415,16 +415,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -603,16 +603,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -792,16 +792,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -974,16 +974,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1157,16 +1157,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1339,16 +1339,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1527,16 +1527,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/sync-metrics_python311.log b/tests/integration/snapshots/logs/sync-metrics_python311.log index 84161ca2..bfb38df9 100644 --- a/tests/integration/snapshots/logs/sync-metrics_python311.log +++ b/tests/integration/snapshots/logs/sync-metrics_python311.log @@ -47,11 +47,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -236,16 +236,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -415,16 +415,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -603,16 +603,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -792,16 +792,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -974,16 +974,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1073,6 +1073,7 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " ] } HTTP POST https://api.datadoghq.com/api/v1/distribution_points Headers: ["Accept-Encoding:gzip, deflate", "Accept:*/*", "Connection:keep-alive", "Content-Encoding:deflate", "Content-Length:XXXX", "Content-Type:application/json", "DD-API-KEY:XXXX", "User-Agent:datadogpy/XX (python XX; os linux; arch XXXX)", "traceparent:XXX", "tracestate:XXX +END Duration: XXXX ms Memory Used: XXXX MB { "traces": [ [ @@ -1111,7 +1112,6 @@ HTTP POST https://api.datadoghq.com/api/v1/distribution_points Headers: ["Accept ] ] } -END Duration: XXXX ms Memory Used: XXXX MB START { "m": "aws.lambda.enhanced.invocations", @@ -1157,16 +1157,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1339,16 +1339,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1527,16 +1527,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/sync-metrics_python312.log b/tests/integration/snapshots/logs/sync-metrics_python312.log index 8b4c74cf..e9e3e339 100644 --- a/tests/integration/snapshots/logs/sync-metrics_python312.log +++ b/tests/integration/snapshots/logs/sync-metrics_python312.log @@ -47,11 +47,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -236,16 +236,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -415,16 +415,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -603,16 +603,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -792,16 +792,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -974,16 +974,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1157,16 +1157,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1339,16 +1339,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1527,16 +1527,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/sync-metrics_python38.log b/tests/integration/snapshots/logs/sync-metrics_python38.log index 4fe60af6..e9e6eea6 100644 --- a/tests/integration/snapshots/logs/sync-metrics_python38.log +++ b/tests/integration/snapshots/logs/sync-metrics_python38.log @@ -47,11 +47,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -236,16 +236,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -415,16 +415,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -603,16 +603,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -792,16 +792,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -974,16 +974,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1157,16 +1157,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1339,16 +1339,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1527,16 +1527,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/integration/snapshots/logs/sync-metrics_python39.log b/tests/integration/snapshots/logs/sync-metrics_python39.log index 98d74ed6..ebc39ab5 100644 --- a/tests/integration/snapshots/logs/sync-metrics_python39.log +++ b/tests/integration/snapshots/logs/sync-metrics_python39.log @@ -47,11 +47,11 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, @@ -236,16 +236,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -415,16 +415,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -603,16 +603,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "http" }, @@ -792,16 +792,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -974,16 +974,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1157,16 +1157,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1339,16 +1339,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.synchronicity": "async", "_inferred_span.tag_source": "self", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, @@ -1527,16 +1527,16 @@ HTTP GET https://www.datadoghq.com/ Headers: ["Accept-Encoding:gzip, deflate", " "_inferred_span.tag_source": "self", "http.status_code": "200", "peer.service": "integration-tests-python", + "_dd.peer.service.source": "peer.service", + "_dd.base_service": "integration-tests-python", "_dd.p.dm": "-0", "_dd.p.tid": "XXXX", - "language": "python", - "_dd.peer.service.source": "peer.service", - "_dd.base_service": "integration-tests-python" + "language": "python" }, "metrics": { "process_id": XXXX, "_dd.top_level": 1, - "_sampling_priority_v1": 1 + "_sampling_priority_v1": -1 }, "type": "web" }, diff --git a/tests/test_benchmarks.py b/tests/test_benchmarks.py new file mode 100644 index 00000000..899e3d7f --- /dev/null +++ b/tests/test_benchmarks.py @@ -0,0 +1,97 @@ +import builtins +import json +import os +import pytest + +import ddtrace + +from datadog_lambda import metric +from datadog_lambda import tag_object +from datadog_lambda import tracing +from datadog_lambda import trigger +from datadog_lambda import xray + +from datadog_lambda.constants import XrayDaemon, XraySubsegment + +from tests.utils import get_mock_context, reset_xray_connection + + +event_samples_dir = "tests/event_samples" +event_samples = [f[:-5] for f in os.listdir(event_samples_dir) if f.endswith(".json")] + + +def test_metric_write_metric_point_to_stdout(benchmark, monkeypatch): + monkeypatch.setattr(builtins, "print", lambda *a, **k: None) + benchmark( + metric.write_metric_point_to_stdout, + "metric_name", + 1, + tags=[ + "tag1:value1", + "tag2:value2", + "tag3:value3", + ], + ) + + +@pytest.mark.parametrize("event", event_samples) +def test_tag_object_tag_object(event, benchmark): + with open(f"{event_samples_dir}/{event}.json") as f: + event = json.load(f) + span = ddtrace.tracer.start_span("test") + benchmark(tag_object.tag_object, span, "function.request", event) + + +@pytest.mark.parametrize("event", event_samples) +def test_tracing_create_inferred_span(event, benchmark): + with open(f"{event_samples_dir}/{event}.json") as f: + event = json.load(f) + context = get_mock_context() + benchmark(tracing.create_inferred_span, event, context) + + +@pytest.mark.parametrize("event", event_samples) +def test_tracing_extract_dd_trace_context(event, benchmark): + with open(f"{event_samples_dir}/{event}.json") as f: + event = json.load(f) + context = get_mock_context() + benchmark(tracing.extract_dd_trace_context, event, context) + + +@pytest.mark.parametrize("event", event_samples) +def test_trigger_parse_event_source(event, benchmark): + with open(f"{event_samples_dir}/{event}.json") as f: + event = json.load(f) + benchmark(trigger.parse_event_source, event) + + +@pytest.mark.parametrize("event", event_samples) +def test_trigger_extract_trigger_tags(event, benchmark): + with open(f"{event_samples_dir}/{event}.json") as f: + event = json.load(f) + context = get_mock_context() + benchmark(trigger.extract_trigger_tags, event, context) + + +def test_xray_send_segment(benchmark, monkeypatch): + reset_xray_connection() + + monkeypatch.setenv(XrayDaemon.XRAY_DAEMON_ADDRESS, "localhost:9000") + monkeypatch.setenv( + XrayDaemon.XRAY_TRACE_ID_HEADER_NAME, + "Root=1-5e272390-8c398be037738dc042009320;Parent=94ae789b969f1cc5;Sampled=1;Lineage=c6c5b1b9:0", + ) + + def socket_send(*a, **k): + sends.append(True) + + sends = [] + monkeypatch.setattr("socket.socket.send", socket_send) + + key = { + "trace-id": "12345678901234567890123456789012", + "parent-id": "1234567890123456", + "sampling-priority": "1", + } + benchmark(xray.send_segment, XraySubsegment.TRACE_KEY, key) + assert sends diff --git a/tests/test_metric.py b/tests/test_metric.py index 24c9a56d..992084b9 100644 --- a/tests/test_metric.py +++ b/tests/test_metric.py @@ -10,7 +10,7 @@ from datadog_lambda.metric import lambda_metric from datadog_lambda.api import decrypt_kms_api_key, KMS_ENCRYPTION_CONTEXT_KEY from datadog_lambda.thread_stats_writer import ThreadStatsWriter -from datadog_lambda.tags import _format_dd_lambda_layer_tag +from datadog_lambda.tags import dd_lambda_layer_tag class TestLambdaMetric(unittest.TestCase): @@ -23,12 +23,13 @@ def test_lambda_metric_tagged_with_dd_lambda_layer(self): lambda_metric("test", 1) lambda_metric("test", 1, 123, []) lambda_metric("test", 1, tags=["tag1:test"]) - expected_tag = _format_dd_lambda_layer_tag() self.mock_metric_lambda_stats.distribution.assert_has_calls( [ - call("test", 1, timestamp=None, tags=[expected_tag]), - call("test", 1, timestamp=123, tags=[expected_tag]), - call("test", 1, timestamp=None, tags=["tag1:test", expected_tag]), + call("test", 1, timestamp=None, tags=[dd_lambda_layer_tag]), + call("test", 1, timestamp=123, tags=[dd_lambda_layer_tag]), + call( + "test", 1, timestamp=None, tags=["tag1:test", dd_lambda_layer_tag] + ), ] ) @@ -37,9 +38,8 @@ def test_lambda_metric_tagged_with_dd_lambda_layer(self): def test_lambda_metric_flush_to_log_with_extension(self): os.environ["DD_FLUSH_TO_LOG"] = "True" lambda_metric("test", 1) - expected_tag = _format_dd_lambda_layer_tag() self.mock_metric_lambda_stats.distribution.assert_has_calls( - [call("test", 1, timestamp=None, tags=[expected_tag])] + [call("test", 1, timestamp=None, tags=[dd_lambda_layer_tag])] ) del os.environ["DD_FLUSH_TO_LOG"] diff --git a/tests/test_module_name.py b/tests/test_module_name.py index 21396465..a6faf829 100644 --- a/tests/test_module_name.py +++ b/tests/test_module_name.py @@ -1,7 +1,5 @@ import unittest -from unittest.mock import patch, call, MagicMock - from datadog_lambda.module_name import modify_module_name diff --git a/tests/test_tags.py b/tests/test_tags.py index 66c0c39f..07daa8e0 100644 --- a/tests/test_tags.py +++ b/tests/test_tags.py @@ -1,30 +1,25 @@ import unittest -from unittest.mock import patch, MagicMock +from unittest.mock import patch +from datadog_lambda.tags import parse_lambda_tags_from_arn -from datadog_lambda.tags import parse_lambda_tags_from_arn, get_runtime_tag - - -def get_mock_context( - invoked_function_arn="arn:aws:lambda:us-east-1:1234597598159:function:swf-hello-test:$Latest", - function_version="1", -): - lambda_context = MagicMock() - lambda_context.invoked_function_arn = invoked_function_arn - lambda_context.function_version = function_version - return lambda_context +from tests.utils import get_mock_context class TestMetricTags(unittest.TestCase): def setUp(self): - patcher = patch("datadog_lambda.tags.python_version_tuple") + patcher = patch("sys.version_info", (3, 12, 0)) self.mock_python_version_tuple = patcher.start() self.addCleanup(patcher.stop) def test_parse_lambda_tags_from_arn_latest(self): + lambda_context = get_mock_context() + lambda_context.invoked_function_arn = ( + "arn:aws:lambda:us-east-1:1234597598159:function:swf-hello-test:$Latest" + ) self.assertListEqual( - parse_lambda_tags_from_arn(get_mock_context()), + parse_lambda_tags_from_arn(lambda_context), [ "region:us-east-1", "account_id:1234597598159", @@ -63,7 +58,3 @@ def test_parse_lambda_tags_from_arn_alias(self): "resource:swf-hello-test:my_alias-1", ], ) - - def test_get_runtime_tag(self): - self.mock_python_version_tuple.return_value = ("3", "12", "0") - self.assertEqual(get_runtime_tag(), "runtime:python3.12") diff --git a/tests/test_tracing.py b/tests/test_tracing.py index 0aa38613..296bd0dc 100644 --- a/tests/test_tracing.py +++ b/tests/test_tracing.py @@ -5,7 +5,7 @@ import os import unittest -from unittest.mock import MagicMock, Mock, patch, call +from unittest.mock import Mock, patch, call import ddtrace @@ -39,6 +39,9 @@ ) from datadog_lambda.trigger import EventTypes +from tests.utils import get_mock_context + + function_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test" fake_xray_header_value = ( @@ -50,29 +53,6 @@ event_samples = "tests/event_samples/" -class ClientContext(object): - def __init__(self, custom=None): - self.custom = custom - - -def get_mock_context( - aws_request_id="request-id-1", - memory_limit_in_mb="256", - invoked_function_arn=function_arn, - function_version="1", - function_name="Function", - custom=None, -): - lambda_context = MagicMock() - lambda_context.aws_request_id = aws_request_id - lambda_context.memory_limit_in_mb = memory_limit_in_mb - lambda_context.invoked_function_arn = invoked_function_arn - lambda_context.function_version = function_version - lambda_context.function_name = function_name - lambda_context.client_context = ClientContext(custom) - return lambda_context - - def with_trace_propagation_style(style): style_list = list(style.split(",")) @@ -96,6 +76,167 @@ def _wrap(*args, **kwargs): return _wrapper +_test_extract_dd_trace_context = ( + ("api-gateway", Context(trace_id=12345, span_id=67890, sampling_priority=2)), + ( + "api-gateway-no-apiid", + Context(trace_id=12345, span_id=67890, sampling_priority=2), + ), + ( + "api-gateway-non-proxy", + Context(trace_id=12345, span_id=67890, sampling_priority=2), + ), + ( + "api-gateway-non-proxy-async", + Context(trace_id=12345, span_id=67890, sampling_priority=2), + ), + ( + "api-gateway-websocket-connect", + Context(trace_id=12345, span_id=67890, sampling_priority=2), + ), + ( + "api-gateway-websocket-default", + Context(trace_id=12345, span_id=67890, sampling_priority=2), + ), + ( + "api-gateway-websocket-disconnect", + Context(trace_id=12345, span_id=67890, sampling_priority=2), + ), + ( + "authorizer-request-api-gateway-v1", + Context( + trace_id=13478705995797221209, + span_id=8471288263384216896, + sampling_priority=1, + ), + ), + ("authorizer-request-api-gateway-v1-cached", None), + ( + "authorizer-request-api-gateway-v2", + Context( + trace_id=14356983619852933354, + span_id=12658621083505413809, + sampling_priority=1, + ), + ), + ("authorizer-request-api-gateway-v2-cached", None), + ( + "authorizer-request-api-gateway-websocket-connect", + Context( + trace_id=5351047404834723189, + span_id=18230460631156161837, + sampling_priority=1, + ), + ), + ("authorizer-request-api-gateway-websocket-message", None), + ( + "authorizer-token-api-gateway-v1", + Context( + trace_id=17874798268144902712, + span_id=16184667399315372101, + sampling_priority=1, + ), + ), + ("authorizer-token-api-gateway-v1-cached", None), + ("cloudfront", None), + ("cloudwatch-events", None), + ("cloudwatch-logs", None), + ("custom", None), + ("dynamodb", None), + ("eventbridge-custom", Context(trace_id=12345, span_id=67890, sampling_priority=2)), + ( + "eventbridge-sqs", + Context( + trace_id=7379586022458917877, + span_id=2644033662113726488, + sampling_priority=1, + ), + ), + ("http-api", Context(trace_id=12345, span_id=67890, sampling_priority=2)), + ( + "kinesis", + Context( + trace_id=4948377316357291421, + span_id=2876253380018681026, + sampling_priority=1, + ), + ), + ( + "kinesis-batch", + Context( + trace_id=4948377316357291421, + span_id=2876253380018681026, + sampling_priority=1, + ), + ), + ("lambda-url", None), + ("s3", None), + ( + "sns-b64-msg-attribute", + Context( + trace_id=4948377316357291421, + span_id=6746998015037429512, + sampling_priority=1, + ), + ), + ( + "sns-batch", + Context( + trace_id=4948377316357291421, + span_id=6746998015037429512, + sampling_priority=1, + ), + ), + ( + "sns-string-msg-attribute", + Context( + trace_id=4948377316357291421, + span_id=6746998015037429512, + sampling_priority=1, + ), + ), + ( + "sqs-batch", + Context( + trace_id=2684756524522091840, + span_id=7431398482019833808, + sampling_priority=1, + ), + ), + ( + "sqs-java-upstream", + Context( + trace_id=7925498337868555493, + span_id=5245570649555658903, + sampling_priority=1, + ), + ), + ( + "sqs-string-msg-attribute", + Context( + trace_id=2684756524522091840, + span_id=7431398482019833808, + sampling_priority=1, + ), + ), + ({"headers": None}, None), +) + + +@pytest.mark.parametrize("event,expect", _test_extract_dd_trace_context) +def test_extract_dd_trace_context(event, expect): + if isinstance(event, str): + with open(f"{event_samples}{event}.json") as f: + event = json.load(f) + ctx = get_mock_context() + + actual, _, _ = extract_dd_trace_context(event, ctx) + assert (expect is None) is (actual is None) + assert (expect is None) or actual.trace_id == expect.trace_id + assert (expect is None) or actual.span_id == expect.span_id + assert (expect is None) or actual.sampling_priority == expect.sampling_priority + + class TestExtractAndGetDDTraceContext(unittest.TestCase): def setUp(self): global dd_tracing_enabled @@ -1217,7 +1358,7 @@ def __init__(self, service, start, span_type, parent_name=None, tags=None): "http.url": "70ixmpl4fl.execute-api.us-east-2.amazonaws.com/path/to/resource", "operation_name": "aws.apigateway.rest", "request_id": "123", - "resource_names": "POST /path/to/resource", + "resource_names": "POST /{proxy+}", "stage": "prod", }, ), @@ -1291,6 +1432,50 @@ def __init__(self, service, start, span_type, parent_name=None, tags=None): }, ), ), + ( + "api-gateway-v1-parametrized", + _Span( + service="mcwkra0ya4.execute-api.sa-east-1.amazonaws.com", + start=1710529824.52, + span_type="http", + tags={ + "_dd.origin": "lambda", + "_inferred_span.synchronicity": "sync", + "_inferred_span.tag_source": "self", + "apiid": "mcwkra0ya4", + "apiname": "mcwkra0ya4", + "endpoint": "/user/42", + "http.method": "GET", + "http.url": "mcwkra0ya4.execute-api.sa-east-1.amazonaws.com/user/42", + "operation_name": "aws.apigateway.rest", + "request_id": "123", + "resource_names": "GET /user/{id}", + "stage": "dev", + }, + ), + ), + ( + "api-gateway-v2-parametrized", + _Span( + service="9vj54we5ih.execute-api.sa-east-1.amazonaws.com", + start=1710529905.066, + span_type="http", + tags={ + "_dd.origin": "lambda", + "_inferred_span.synchronicity": "sync", + "_inferred_span.tag_source": "self", + "apiid": "9vj54we5ih", + "apiname": "9vj54we5ih", + "endpoint": "/user/42", + "http.method": "GET", + "http.url": "9vj54we5ih.execute-api.sa-east-1.amazonaws.com/user/42", + "operation_name": "aws.httpapi", + "request_id": "123", + "resource_names": "GET /user/{id}", + "stage": "$default", + }, + ), + ), ( "api-gateway-websocket-default", _Span( @@ -1555,7 +1740,7 @@ def __init__(self, service, start, span_type, parent_name=None, tags=None): "http.url": "70ixmpl4fl.execute-api.us-east-2.amazonaws.com/path/to/resource", "operation_name": "aws.apigateway.rest", "request_id": "123", - "resource_names": "POST /path/to/resource", + "resource_names": "POST /{proxy+}", "stage": "prod", }, ), @@ -1773,127 +1958,6 @@ def test_create_inferred_span(mock_span_finish, source, expect): class TestInferredSpans(unittest.TestCase): - def test_extract_context_from_eventbridge_event(self): - event_sample_source = "eventbridge-custom" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_type = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 12345) - self.assertEqual(context.span_id, 67890), - self.assertEqual(context.sampling_priority, 2) - - def test_extract_dd_trace_context_for_eventbridge(self): - event_sample_source = "eventbridge-custom" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_type = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 12345) - self.assertEqual(context.span_id, 67890) - - def test_extract_context_from_eventbridge_sqs_event(self): - event_sample_source = "eventbridge-sqs" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - - ctx = get_mock_context() - context, source, event_type = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 7379586022458917877) - self.assertEqual(context.span_id, 2644033662113726488) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_sqs_event_with_string_msg_attr(self): - event_sample_source = "sqs-string-msg-attribute" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_type = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 2684756524522091840) - self.assertEqual(context.span_id, 7431398482019833808) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_sqs_batch_event(self): - event_sample_source = "sqs-batch" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_source = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 2684756524522091840) - self.assertEqual(context.span_id, 7431398482019833808) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_sqs_java_upstream_event(self): - event_sample_source = "sqs-java-upstream" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_type = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 7925498337868555493) - self.assertEqual(context.span_id, 5245570649555658903) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_sns_event_with_string_msg_attr(self): - event_sample_source = "sns-string-msg-attribute" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_source = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 4948377316357291421) - self.assertEqual(context.span_id, 6746998015037429512) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_sns_event_with_b64_msg_attr(self): - event_sample_source = "sns-b64-msg-attribute" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_source = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 4948377316357291421) - self.assertEqual(context.span_id, 6746998015037429512) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_sns_batch_event(self): - event_sample_source = "sns-batch" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_source = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 4948377316357291421) - self.assertEqual(context.span_id, 6746998015037429512) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_kinesis_event(self): - event_sample_source = "kinesis" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_source = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 4948377316357291421) - self.assertEqual(context.span_id, 2876253380018681026) - self.assertEqual(context.sampling_priority, 1) - - def test_extract_context_from_kinesis_batch_event(self): - event_sample_source = "kinesis-batch" - test_file = event_samples + event_sample_source + ".json" - with open(test_file, "r") as event: - event = json.load(event) - ctx = get_mock_context() - context, source, event_source = extract_dd_trace_context(event, ctx) - self.assertEqual(context.trace_id, 4948377316357291421) - self.assertEqual(context.span_id, 2876253380018681026) - self.assertEqual(context.sampling_priority, 1) - @patch("datadog_lambda.tracing.submit_errors_metric") def test_mark_trace_as_error_for_5xx_responses_getting_400_response_code( self, mock_submit_errors_metric @@ -1915,14 +1979,6 @@ def test_mark_trace_as_error_for_5xx_responses_sends_error_metric_and_set_error_ mock_submit_errors_metric.assert_called_once() self.assertEqual(1, mock_span.error) - def test_no_error_with_nonetype_headers(self): - lambda_ctx = get_mock_context() - ctx, source, event_type = extract_dd_trace_context( - {"headers": None}, - lambda_ctx, - ) - self.assertEqual(ctx, None) - class TestStepFunctionsTraceContext(unittest.TestCase): def test_deterministic_m5_hash(self): diff --git a/tests/test_trigger.py b/tests/test_trigger.py index 2f514811..59178b1e 100644 --- a/tests/test_trigger.py +++ b/tests/test_trigger.py @@ -11,16 +11,12 @@ extract_http_status_code_tag, ) +from tests.utils import get_mock_context + event_samples = "tests/event_samples/" function_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test" -def get_mock_context(invoked_function_arn=function_arn): - lambda_context = MagicMock() - lambda_context.invoked_function_arn = invoked_function_arn - return lambda_context - - class TestGetEventSourceAndARN(unittest.TestCase): def test_event_source_api_gateway(self): event_sample_source = "api-gateway" diff --git a/tests/test_version.py b/tests/test_version.py new file mode 100644 index 00000000..ca3c8f03 --- /dev/null +++ b/tests/test_version.py @@ -0,0 +1,7 @@ +import importlib.metadata +from datadog_lambda import __version__ + + +def test_version(): + # test version in __init__ matches version in pyproject.toml + assert importlib.metadata.version("datadog-lambda") == __version__ diff --git a/tests/test_wrapper.py b/tests/test_wrapper.py index 141e245a..13fef2b6 100644 --- a/tests/test_wrapper.py +++ b/tests/test_wrapper.py @@ -3,30 +3,17 @@ import os import unittest -from unittest.mock import patch, call, ANY, MagicMock +from unittest.mock import patch, call, ANY from datadog_lambda.constants import TraceHeader import datadog_lambda.wrapper as wrapper +import datadog_lambda.xray as xray from datadog_lambda.metric import lambda_metric from datadog_lambda.thread_stats_writer import ThreadStatsWriter from ddtrace import Span, tracer from ddtrace.internal.constants import MAX_UINT_64BITS - -def get_mock_context( - aws_request_id="request-id-1", - memory_limit_in_mb="256", - invoked_function_arn="arn:aws:lambda:us-west-1:123457598159:function:python-layer-test:1", - function_version="1", - client_context={}, -): - lambda_context = MagicMock() - lambda_context.aws_request_id = aws_request_id - lambda_context.memory_limit_in_mb = memory_limit_in_mb - lambda_context.invoked_function_arn = invoked_function_arn - lambda_context.function_version = function_version - lambda_context.client_context = client_context - return lambda_context +from tests.utils import get_mock_context, reset_xray_connection class TestDatadogLambdaWrapper(unittest.TestCase): @@ -61,31 +48,32 @@ def setUp(self): self.mock_patch_all = patcher.start() self.addCleanup(patcher.stop) - patcher = patch("datadog_lambda.cold_start.is_cold_start") - self.mock_is_cold_start = patcher.start() - self.mock_is_cold_start.return_value = True + patcher = patch("datadog_lambda.tags.get_cold_start_tag") + self.mock_get_cold_start_tag = patcher.start() + self.mock_get_cold_start_tag.return_value = "cold_start:true" self.addCleanup(patcher.stop) - patcher = patch("datadog_lambda.tags.python_version_tuple") - self.mock_python_version_tuple = patcher.start() - self.mock_python_version_tuple.return_value = ("3", "9", "10") + patcher = patch("datadog_lambda.tags.runtime_tag", "runtime:python3.9") + self.mock_runtime_tag = patcher.start() self.addCleanup(patcher.stop) patcher = patch("datadog_lambda.metric.write_metric_point_to_stdout") self.mock_write_metric_point_to_stdout = patcher.start() self.addCleanup(patcher.stop) - patcher = patch("datadog_lambda.tags.get_library_version_tag") - self.mock_format_dd_lambda_layer_tag = patcher.start() + patcher = patch( + "datadog_lambda.tags.library_version_tag", "datadog_lambda:v6.6.6" + ) # Mock the layer version so we don't have to update tests on every version bump - self.mock_format_dd_lambda_layer_tag.return_value = "datadog_lambda:v6.6.6" + self.mock_library_version_tag = patcher.start() + self.addCleanup(patcher.stop) - patcher = patch("datadog_lambda.tags._format_dd_lambda_layer_tag") - self.mock_format_dd_lambda_layer_tag = patcher.start() - # Mock the layer version so we don't have to update tests on every version bump - self.mock_format_dd_lambda_layer_tag.return_value = ( - "dd_lambda_layer:datadog-python39_X.X.X" + patcher = patch( + "datadog_lambda.metric.dd_lambda_layer_tag", + "dd_lambda_layer:datadog-python39_X.X.X", ) + # Mock the layer version so we don't have to update tests on every version bump + self.mock_dd_lambda_layer_tag = patcher.start() self.addCleanup(patcher.stop) def test_datadog_lambda_wrapper(self): @@ -231,7 +219,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:true", "memorysize:256", "runtime:python3.9", @@ -262,7 +250,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:true", "memorysize:256", "runtime:python3.9", @@ -278,7 +266,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:true", "memorysize:256", "runtime:python3.9", @@ -317,7 +305,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:true", "memorysize:256", "runtime:python3.9", @@ -333,7 +321,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:true", "memorysize:256", "runtime:python3.9", @@ -354,7 +342,7 @@ def lambda_handler(event, context): lambda_handler(lambda_event, get_mock_context()) - self.mock_is_cold_start.return_value = False + self.mock_get_cold_start_tag.return_value = "cold_start:false" lambda_handler( lambda_event, get_mock_context(aws_request_id="second-request-id") @@ -369,7 +357,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:true", "memorysize:256", "runtime:python3.9", @@ -385,7 +373,7 @@ def lambda_handler(event, context): "region:us-west-1", "account_id:123457598159", "functionname:python-layer-test", - "resource:python-layer-test:1", + "resource:python-layer-test", "cold_start:false", "memorysize:256", "runtime:python3.9", @@ -466,7 +454,9 @@ def lambda_handler(event, context): ) def test_no_enhanced_metrics_without_env_var(self): - os.environ["DD_ENHANCED_METRICS"] = "false" + patcher = patch("datadog_lambda.metric.enhanced_metrics_enabled", False) + patcher.start() + self.addCleanup(patcher.stop) @wrapper.datadog_lambda_wrapper def lambda_handler(event, context): @@ -479,8 +469,6 @@ def lambda_handler(event, context): self.mock_write_metric_point_to_stdout.assert_not_called() - del os.environ["DD_ENHANCED_METRICS"] - def test_only_one_wrapper_in_use(self): patcher = patch("datadog_lambda.wrapper.submit_invocations_metric") self.mock_submit_invocations_metric = patcher.start() @@ -546,6 +534,7 @@ def lambda_handler(event, context): lambda_context = get_mock_context() test_span = tracer.trace("test_span") trace_ctx = tracer.current_trace_context() + trace_ctx.sampling_priority = 1 test_span.finish() lambda_handler.inferred_span = test_span lambda_handler.make_inferred_span = False @@ -603,7 +592,9 @@ class TestLambdaWrapperWithTraceContext(unittest.TestCase): }, ) def test_event_bridge_sqs_payload(self): - patcher = patch("datadog_lambda.xray.send") + reset_xray_connection() + + patcher = patch("datadog_lambda.xray.sock.send") mock_send = patcher.start() self.addCleanup(patcher.stop) @@ -636,7 +627,7 @@ def handler(event, context): self.assertEqual(result.span_id, aws_lambda_span.span_id) self.assertEqual(result.sampling_priority, 1) mock_send.assert_called_once() - (_, raw_payload), _ = mock_send.call_args + (raw_payload,), _ = mock_send.call_args payload = json.loads(raw_payload[33:]) # strip formatting prefix self.assertEqual(self.xray_root, payload["trace_id"]) self.assertEqual(self.xray_parent, payload["parent_id"]) diff --git a/tests/test_xray.py b/tests/test_xray.py index ac3594a9..7f33f891 100644 --- a/tests/test_xray.py +++ b/tests/test_xray.py @@ -4,15 +4,14 @@ from unittest.mock import MagicMock, patch -from datadog_lambda.xray import ( - get_xray_host_port, - build_segment_payload, - build_segment, - send_segment, -) +from datadog_lambda.xray import build_segment_payload, build_segment, send_segment, sock +from tests.utils import reset_xray_connection class TestXRay(unittest.TestCase): + def setUp(self): + reset_xray_connection() + def tearDown(self): if os.environ.get("_X_AMZN_TRACE_ID"): os.environ.pop("_X_AMZN_TRACE_ID") @@ -21,15 +20,15 @@ def tearDown(self): return super().tearDown() def test_get_xray_host_port_empty_(self): - result = get_xray_host_port("") + result = sock._get_xray_host_port("") self.assertIsNone(result) def test_get_xray_host_port_invalid_value(self): - result = get_xray_host_port("myVar") + result = sock._get_xray_host_port("myVar") self.assertIsNone(result) def test_get_xray_host_port_success(self): - result = get_xray_host_port("mySuperHost:1000") + result = sock._get_xray_host_port("mySuperHost:1000") self.assertEqual("mySuperHost", result[0]) self.assertEqual(1000, result[1]) @@ -40,7 +39,7 @@ def test_send_segment_sampled_out(self): ] = "Root=1-5e272390-8c398be037738dc042009320;Parent=94ae789b969f1cc5;Sampled=0;Lineage=c6c5b1b9:0" with patch( - "datadog_lambda.xray.send", MagicMock(return_value=None) + "datadog_lambda.xray.sock.send", MagicMock(return_value=None) ) as mock_send: # XRay trace won't be sampled according to the trace header. send_segment("my_key", {"data": "value"}) @@ -52,7 +51,7 @@ def test_send_segment_sampled(self): "_X_AMZN_TRACE_ID" ] = "Root=1-5e272390-8c398be037738dc042009320;Parent=94ae789b969f1cc5;Sampled=1;Lineage=c6c5b1b9:0" with patch( - "datadog_lambda.xray.send", MagicMock(return_value=None) + "datadog_lambda.xray.sock.send", MagicMock(return_value=None) ) as mock_send: # X-Ray trace will be sampled according to the trace header. send_segment("my_key", {"data": "value"}) diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..0f246e68 --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,36 @@ +from unittest.mock import MagicMock + +function_arn = "arn:aws:lambda:us-west-1:123457598159:function:python-layer-test" + + +class ClientContext(object): + def __init__(self, custom=None): + self.custom = custom + + +def get_mock_context( + aws_request_id="request-id-1", + memory_limit_in_mb="256", + invoked_function_arn=function_arn, + function_version="1", + function_name="Function", + custom=None, +): + lambda_context = MagicMock() + lambda_context.aws_request_id = aws_request_id + lambda_context.memory_limit_in_mb = memory_limit_in_mb + lambda_context.invoked_function_arn = invoked_function_arn + lambda_context.function_version = function_version + lambda_context.function_name = function_name + lambda_context.client_context = ClientContext(custom) + return lambda_context + + +def reset_xray_connection(): + from datadog_lambda.xray import sock + + if hasattr(sock, "_host_port_tuple"): + del sock._host_port_tuple + if sock.sock: + sock.sock.close() + sock.sock = None