diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2adf01e7bd4..d18ef83934f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
+## [1.9.1] - 2020-12-21
+
+### Fixed
+- **Logger**: Bugfix to prevent parent loggers with the same name being configured more than once
+
+### Added
+- **Docs**: Add clarification to Tracer docs for how `capture_method` decorator can cause function responses to be read and serialized.
+- **Utilities**: Added equality to ease testing Event source data classes
+- **Package**: Added `py.typed` for initial work needed for PEP 561 compliance
+
## [1.9.0] - 2020-12-04
### Added
diff --git a/Makefile b/Makefile
index f9e976576df..4185d5abd63 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,5 @@
+.PHONY: target dev dev-docs format lint test coverage-html pr build build-docs build-docs-api build-docs-website
+.PHONY: docs-local docs-api-local security-baseline complexity-baseline release-prod release-test release
target:
@$(MAKE) pr
diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py
index 8e7adcd0985..74663ec7b4a 100644
--- a/aws_lambda_powertools/logging/logger.py
+++ b/aws_lambda_powertools/logging/logger.py
@@ -148,21 +148,32 @@ def _get_logger(self):
def _init_logger(self, **kwargs):
"""Configures new logger"""
- # Skip configuration if it's a child logger to prevent
- # multiple handlers being attached as well as different sampling mechanisms
- # and multiple messages from being logged as handlers can be duplicated
- if not self.child:
- self._configure_sampling()
- self._logger.setLevel(self.log_level)
- self._logger.addHandler(self._handler)
- self.structure_logs(**kwargs)
-
- logger.debug("Adding filter in root logger to suppress child logger records to bubble up")
- for handler in logging.root.handlers:
- # It'll add a filter to suppress any child logger from self.service
- # Where service is Order, it'll reject parent logger Order,
- # and child loggers such as Order.checkout, Order.shared
- handler.addFilter(SuppressFilter(self.service))
+ # Skip configuration if it's a child logger or a pre-configured logger
+ # to prevent the following:
+ # a) multiple handlers being attached
+ # b) different sampling mechanisms
+ # c) multiple messages from being logged as handlers can be duplicated
+ is_logger_preconfigured = getattr(self._logger, "init", False)
+ if self.child or is_logger_preconfigured:
+ return
+
+ self._configure_sampling()
+ self._logger.setLevel(self.log_level)
+ self._logger.addHandler(self._handler)
+ self.structure_logs(**kwargs)
+
+ logger.debug("Adding filter in root logger to suppress child logger records to bubble up")
+ for handler in logging.root.handlers:
+ # It'll add a filter to suppress any child logger from self.service
+ # Where service is Order, it'll reject parent logger Order,
+ # and child loggers such as Order.checkout, Order.shared
+ handler.addFilter(SuppressFilter(self.service))
+
+ # as per bug in #249, we should not be pre-configuring an existing logger
+ # therefore we set a custom attribute in the Logger that will be returned
+ # std logging will return the same Logger with our attribute if name is reused
+ logger.debug(f"Marking logger {self.service} as preconfigured")
+ self._logger.init = True
def _configure_sampling(self):
"""Dynamically set log level based on sampling rate
diff --git a/aws_lambda_powertools/py.typed b/aws_lambda_powertools/py.typed
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/aws_lambda_powertools/utilities/data_classes/common.py b/aws_lambda_powertools/utilities/data_classes/common.py
index 7c092a2b526..94a357b3180 100644
--- a/aws_lambda_powertools/utilities/data_classes/common.py
+++ b/aws_lambda_powertools/utilities/data_classes/common.py
@@ -10,6 +10,12 @@ def __init__(self, data: Dict[str, Any]):
def __getitem__(self, key: str) -> Any:
return self._data[key]
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, DictWrapper):
+ return False
+
+ return self._data == other._data
+
def get(self, key: str) -> Optional[Any]:
return self._data.get(key)
diff --git a/docs/content/core/tracer.mdx b/docs/content/core/tracer.mdx
index 4042d51a861..62559ac9fe2 100644
--- a/docs/content/core/tracer.mdx
+++ b/docs/content/core/tracer.mdx
@@ -71,6 +71,7 @@ def handler(event, context):
You can disable Tracer from capturing their responses as tracing metadata with capture_response=False
parameter in both capture_lambda_handler and capture_method decorators.
+
```python:title=do_not_capture_response_as_metadata.py
# Disables Tracer from capturing response and adding as metadata
# Useful when dealing with sensitive data
@@ -116,6 +117,14 @@ def handler(event, context):
You can trace a synchronous function using the `capture_method`.
+
+ When capture_response
is enabled, the function response will be read and serialized as json.
+
+ The serialization is performed by the aws-xray-sdk which uses the jsonpickle
module. This can cause
+ unintended consequences if there are side effects to recursively reading the returned value, for example if the
+ decorated function response contains a file-like object or a StreamingBody
for S3 objects.
+
+
```python:title=app.py
@tracer.capture_method
def collect_payment(charge_id):
@@ -126,6 +135,14 @@ def collect_payment(charge_id):
@tracer.capture_method(capture_response=False) # highlight-line
def sensitive_information_to_be_processed():
return "sensitive_information"
+
+# If we capture response, the s3_object["Body"].read() method will be called by x-ray-sdk when
+# trying to serialize the object. This will cause it to return empty next time it is called.
+@tracer.capture_method(capture_response=False) # highlight-line
+def get_s3_object(bucket_name, object_key):
+ s3 = boto3.client("s3")
+ s3_object = get_object(Bucket=bucket_name, Key=object_key)
+ return s3_object
```
## Asynchronous and generator functions
diff --git a/docs/yarn.lock b/docs/yarn.lock
index b6fdd50b72b..36e6c46c79e 100644
--- a/docs/yarn.lock
+++ b/docs/yarn.lock
@@ -9490,9 +9490,9 @@ inherits@2.0.3:
integrity sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=
ini@^1.3.4, ini@^1.3.5, ini@~1.3.0:
- version "1.3.5"
- resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927"
- integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==
+ version "1.3.8"
+ resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c"
+ integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==
inline-style-parser@0.1.1:
version "0.1.1"
diff --git a/pyproject.toml b/pyproject.toml
index 735965d8cde..78b5d712406 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,8 +1,9 @@
[tool.poetry]
name = "aws_lambda_powertools"
-version = "1.9.0"
+version = "1.9.1"
description = "Python utilities for AWS Lambda functions including but not limited to tracing, logging and custom metric"
authors = ["Amazon Web Services"]
+include = ["aws_lambda_powertools/py.typed"]
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
diff --git a/tests/events/s3EventDecodedKey.json b/tests/events/s3EventDecodedKey.json
new file mode 100644
index 00000000000..05f5ab5c4bd
--- /dev/null
+++ b/tests/events/s3EventDecodedKey.json
@@ -0,0 +1,40 @@
+{
+ "Records": [
+ {
+ "eventVersion": "2.0",
+ "eventSource": "aws:s3",
+ "awsRegion": "us-east-1",
+ "eventTime": "1970-01-01T00:00:00.123Z",
+ "eventName": "ObjectCreated:Put",
+ "userIdentity": {
+ "principalId": "EXAMPLE"
+ },
+ "requestParameters": {
+ "sourceIPAddress": "127.0.0.1"
+ },
+ "responseElements": {
+ "x-amz-request-id": "C3D13FE58DE4C810",
+ "x-amz-id-2": "FMyUVURIY8/IgAtTv8xRjskZQpcIZ9KG4V5Wp6S7S/JRWeUWerMUE5JgHvANOjpD"
+ },
+ "s3": {
+ "s3SchemaVersion": "1.0",
+ "configurationId": "testConfigRule",
+ "bucket": {
+ "name": "sourcebucket",
+ "ownerIdentity": {
+ "principalId": "EXAMPLE"
+ },
+ "arn": "arn:aws:s3:::mybucket"
+ },
+ "object": {
+ "key": "Happy%20Face.jpg",
+ "urlDecodedKey": "Happy Face.jpg",
+ "size": 1024,
+ "versionId": "version",
+ "eTag": "d41d8cd98f00b204e9800998ecf8427e",
+ "sequencer": "Happy Sequencer"
+ }
+ }
+ }
+ ]
+}
diff --git a/tests/functional/parser/test_cloudwatch.py b/tests/functional/parser/test_cloudwatch.py
index dd7fd503f39..9a61f339140 100644
--- a/tests/functional/parser/test_cloudwatch.py
+++ b/tests/functional/parser/test_cloudwatch.py
@@ -1,7 +1,7 @@
import base64
import json
import zlib
-from typing import List
+from typing import Any, List
import pytest
@@ -69,6 +69,14 @@ def test_handle_cloudwatch_trigger_event_no_envelope():
handle_cloudwatch_logs_no_envelope(event_dict, LambdaContext())
+def test_handle_invalid_cloudwatch_trigger_event_no_envelope():
+ event_dict: Any = {"awslogs": {"data": "invalid_data"}}
+ with pytest.raises(ValidationError) as context:
+ handle_cloudwatch_logs_no_envelope(event_dict, LambdaContext())
+
+ assert context.value.errors()[0]["msg"] == "unable to decompress data"
+
+
def test_handle_invalid_event_with_envelope():
with pytest.raises(ValidationError):
handle_cloudwatch_logs(event={}, context=LambdaContext())
diff --git a/tests/functional/test_aws_lambda_logging.py b/tests/functional/test_aws_lambda_logging.py
index 79667f907fc..b28a753fc98 100644
--- a/tests/functional/test_aws_lambda_logging.py
+++ b/tests/functional/test_aws_lambda_logging.py
@@ -1,6 +1,8 @@
"""aws_lambda_logging tests."""
import io
import json
+import random
+import string
import pytest
@@ -12,9 +14,15 @@ def stdout():
return io.StringIO()
+@pytest.fixture
+def service_name():
+ chars = string.ascii_letters + string.digits
+ return "".join(random.SystemRandom().choice(chars) for _ in range(15))
+
+
@pytest.mark.parametrize("level", ["DEBUG", "WARNING", "ERROR", "INFO", "CRITICAL"])
-def test_setup_with_valid_log_levels(stdout, level):
- logger = Logger(level=level, stream=stdout, request_id="request id!", another="value")
+def test_setup_with_valid_log_levels(stdout, level, service_name):
+ logger = Logger(service=service_name, level=level, stream=stdout, request_id="request id!", another="value")
msg = "This is a test"
log_command = {
"INFO": logger.info,
@@ -37,8 +45,8 @@ def test_setup_with_valid_log_levels(stdout, level):
assert "exception" not in log_dict
-def test_logging_exception_traceback(stdout):
- logger = Logger(level="DEBUG", stream=stdout)
+def test_logging_exception_traceback(stdout, service_name):
+ logger = Logger(service=service_name, level="DEBUG", stream=stdout)
try:
raise ValueError("Boom")
@@ -52,9 +60,9 @@ def test_logging_exception_traceback(stdout):
assert "exception" in log_dict
-def test_setup_with_invalid_log_level(stdout):
+def test_setup_with_invalid_log_level(stdout, service_name):
with pytest.raises(ValueError) as e:
- Logger(level="not a valid log level")
+ Logger(service=service_name, level="not a valid log level")
assert "Unknown level" in e.value.args[0]
@@ -65,8 +73,8 @@ def check_log_dict(log_dict):
assert "message" in log_dict
-def test_with_dict_message(stdout):
- logger = Logger(level="DEBUG", stream=stdout)
+def test_with_dict_message(stdout, service_name):
+ logger = Logger(service=service_name, level="DEBUG", stream=stdout)
msg = {"x": "isx"}
logger.critical(msg)
@@ -76,8 +84,8 @@ def test_with_dict_message(stdout):
assert msg == log_dict["message"]
-def test_with_json_message(stdout):
- logger = Logger(stream=stdout)
+def test_with_json_message(stdout, service_name):
+ logger = Logger(service=service_name, stream=stdout)
msg = {"x": "isx"}
logger.info(json.dumps(msg))
@@ -87,8 +95,8 @@ def test_with_json_message(stdout):
assert msg == log_dict["message"]
-def test_with_unserializable_value_in_message(stdout):
- logger = Logger(level="DEBUG", stream=stdout)
+def test_with_unserializable_value_in_message(stdout, service_name):
+ logger = Logger(service=service_name, level="DEBUG", stream=stdout)
class Unserializable:
pass
@@ -101,12 +109,17 @@ class Unserializable:
assert log_dict["message"]["x"].startswith("<")
-def test_with_unserializable_value_in_message_custom(stdout):
+def test_with_unserializable_value_in_message_custom(stdout, service_name):
class Unserializable:
pass
# GIVEN a custom json_default
- logger = Logger(level="DEBUG", stream=stdout, json_default=lambda o: f"")
+ logger = Logger(
+ service=service_name,
+ level="DEBUG",
+ stream=stdout,
+ json_default=lambda o: f"",
+ )
# WHEN we log a message
logger.debug({"x": Unserializable()})
@@ -118,9 +131,9 @@ class Unserializable:
assert "json_default" not in log_dict
-def test_log_dict_key_seq(stdout):
+def test_log_dict_key_seq(stdout, service_name):
# GIVEN the default logger configuration
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN logging a message
logger.info("Message")
@@ -131,9 +144,9 @@ def test_log_dict_key_seq(stdout):
assert ",".join(list(log_dict.keys())[:4]) == "level,location,message,timestamp"
-def test_log_dict_key_custom_seq(stdout):
+def test_log_dict_key_custom_seq(stdout, service_name):
# GIVEN a logger configuration with log_record_order set to ["message"]
- logger = Logger(stream=stdout, log_record_order=["message"])
+ logger = Logger(service=service_name, stream=stdout, log_record_order=["message"])
# WHEN logging a message
logger.info("Message")
@@ -144,9 +157,9 @@ def test_log_dict_key_custom_seq(stdout):
assert list(log_dict.keys())[0] == "message"
-def test_log_custom_formatting(stdout):
+def test_log_custom_formatting(stdout, service_name):
# GIVEN a logger where we have a custom `location`, 'datefmt' format
- logger = Logger(stream=stdout, location="[%(funcName)s] %(module)s", datefmt="fake-datefmt")
+ logger = Logger(service=service_name, stream=stdout, location="[%(funcName)s] %(module)s", datefmt="fake-datefmt")
# WHEN logging a message
logger.info("foo")
@@ -158,7 +171,7 @@ def test_log_custom_formatting(stdout):
assert log_dict["timestamp"] == "fake-datefmt"
-def test_log_dict_key_strip_nones(stdout):
+def test_log_dict_key_strip_nones(stdout, service_name):
# GIVEN a logger confirmation where we set `location` and `timestamp` to None
# Note: level, sampling_rate and service can not be suppressed
logger = Logger(stream=stdout, level=None, location=None, timestamp=None, sampling_rate=None, service=None)
@@ -170,14 +183,15 @@ def test_log_dict_key_strip_nones(stdout):
# THEN the keys should only include `level`, `message`, `service`, `sampling_rate`
assert sorted(log_dict.keys()) == ["level", "message", "sampling_rate", "service"]
+ assert log_dict["service"] == "service_undefined"
-def test_log_dict_xray_is_present_when_tracing_is_enabled(stdout, monkeypatch):
+def test_log_dict_xray_is_present_when_tracing_is_enabled(stdout, monkeypatch, service_name):
# GIVEN a logger is initialized within a Lambda function with X-Ray enabled
trace_id = "1-5759e988-bd862e3fe1be46a994272793"
trace_header = f"Root={trace_id};Parent=53995c3f42cd8ad8;Sampled=1"
monkeypatch.setenv(name="_X_AMZN_TRACE_ID", value=trace_header)
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN logging a message
logger.info("foo")
@@ -190,9 +204,9 @@ def test_log_dict_xray_is_present_when_tracing_is_enabled(stdout, monkeypatch):
monkeypatch.delenv(name="_X_AMZN_TRACE_ID")
-def test_log_dict_xray_is_not_present_when_tracing_is_disabled(stdout, monkeypatch):
+def test_log_dict_xray_is_not_present_when_tracing_is_disabled(stdout, monkeypatch, service_name):
# GIVEN a logger is initialized within a Lambda function with X-Ray disabled (default)
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN logging a message
logger.info("foo")
@@ -203,12 +217,12 @@ def test_log_dict_xray_is_not_present_when_tracing_is_disabled(stdout, monkeypat
assert "xray_trace_id" not in log_dict
-def test_log_dict_xray_is_updated_when_tracing_id_changes(stdout, monkeypatch):
+def test_log_dict_xray_is_updated_when_tracing_id_changes(stdout, monkeypatch, service_name):
# GIVEN a logger is initialized within a Lambda function with X-Ray enabled
trace_id = "1-5759e988-bd862e3fe1be46a994272793"
trace_header = f"Root={trace_id};Parent=53995c3f42cd8ad8;Sampled=1"
monkeypatch.setenv(name="_X_AMZN_TRACE_ID", value=trace_header)
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN logging a message
logger.info("foo")
diff --git a/tests/functional/test_lambda_trigger_events.py b/tests/functional/test_lambda_trigger_events.py
index 5ca0ec76f16..d6d225bf530 100644
--- a/tests/functional/test_lambda_trigger_events.py
+++ b/tests/functional/test_lambda_trigger_events.py
@@ -28,7 +28,7 @@
UserMigrationTriggerEvent,
VerifyAuthChallengeResponseTriggerEvent,
)
-from aws_lambda_powertools.utilities.data_classes.common import BaseProxyEvent
+from aws_lambda_powertools.utilities.data_classes.common import BaseProxyEvent, DictWrapper
from aws_lambda_powertools.utilities.data_classes.dynamo_db_stream_event import (
AttributeValue,
DynamoDBRecordEventName,
@@ -43,6 +43,21 @@ def load_event(file_name: str) -> dict:
return json.load(fp)
+def test_dict_wrapper_equals():
+ class DataClassSample(DictWrapper):
+ @property
+ def message(self) -> str:
+ return self.get("message")
+
+ data1 = {"message": "foo1"}
+ data2 = {"message": "foo2"}
+
+ assert DataClassSample(data1) == DataClassSample(data1)
+ assert DataClassSample(data1) != DataClassSample(data2)
+ assert DataClassSample(data1) is not data1
+ assert data1 is not DataClassSample(data1)
+
+
def test_cloud_watch_trigger_event():
event = CloudWatchLogsEvent(load_event("cloudWatchLogEvent.json"))
@@ -413,6 +428,11 @@ def test_s3_key_unquote_plus():
assert event.object_key == tricky_name
+def test_s3_key_url_decoded_key():
+ event = S3Event(load_event("s3EventDecodedKey.json"))
+ assert event.object_key == event.record["s3"]["object"]["urlDecodedKey"]
+
+
def test_s3_glacier_event():
example_event = {
"Records": [
@@ -434,6 +454,14 @@ def test_s3_glacier_event():
assert glacier_event_data.restore_event_data.lifecycle_restore_storage_class == "standard"
+def test_s3_glacier_event_json():
+ event = S3Event(load_event("s3EventGlacier.json"))
+ glacier_event_data = event.record.glacier_event_data
+ assert glacier_event_data is not None
+ assert glacier_event_data.restore_event_data.lifecycle_restoration_expiry_time == "1970-01-01T00:01:00.000Z"
+ assert glacier_event_data.restore_event_data.lifecycle_restore_storage_class == "standard"
+
+
def test_ses_trigger_event():
event = SESEvent(load_event("sesEvent.json"))
diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py
index ab445a45cf8..dced7da35ba 100644
--- a/tests/functional/test_logger.py
+++ b/tests/functional/test_logger.py
@@ -2,6 +2,8 @@
import io
import json
import logging
+import random
+import string
from collections import namedtuple
import pytest
@@ -33,6 +35,12 @@ def lambda_event():
return {"greeting": "hello"}
+@pytest.fixture
+def service_name():
+ chars = string.ascii_letters + string.digits
+ return "".join(random.SystemRandom().choice(chars) for _ in range(15))
+
+
def capture_logging_output(stdout):
return json.loads(stdout.getvalue().strip())
@@ -41,8 +49,7 @@ def capture_multiple_logging_statements_output(stdout):
return [json.loads(line.strip()) for line in stdout.getvalue().split("\n") if line]
-def test_setup_service_name(stdout):
- service_name = "payment"
+def test_setup_service_name(stdout, service_name):
# GIVEN Logger is initialized
# WHEN service is explicitly defined
logger = Logger(service=service_name, stream=stdout)
@@ -54,20 +61,7 @@ def test_setup_service_name(stdout):
assert service_name == log["service"]
-def test_setup_no_service_name(stdout):
- # GIVEN Logger is initialized
- # WHEN no service is explicitly defined
- logger = Logger(stream=stdout)
-
- logger.info("Hello")
-
- # THEN service field should be "service_undefined"
- log = capture_logging_output(stdout)
- assert "service_undefined" == log["service"]
-
-
-def test_setup_service_env_var(monkeypatch, stdout):
- service_name = "payment"
+def test_setup_service_env_var(monkeypatch, stdout, service_name):
# GIVEN Logger is initialized
# WHEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env
monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name)
@@ -80,12 +74,12 @@ def test_setup_service_env_var(monkeypatch, stdout):
assert service_name == log["service"]
-def test_setup_sampling_rate_env_var(monkeypatch, stdout):
+def test_setup_sampling_rate_env_var(monkeypatch, stdout, service_name):
# GIVEN Logger is initialized
# WHEN samping rate is explicitly set to 100% via POWERTOOLS_LOGGER_SAMPLE_RATE env
sampling_rate = "1"
monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate)
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
logger.debug("I am being sampled")
# THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value
@@ -97,9 +91,9 @@ def test_setup_sampling_rate_env_var(monkeypatch, stdout):
assert "I am being sampled" == log["message"]
-def test_inject_lambda_context(lambda_context, stdout):
+def test_inject_lambda_context(lambda_context, stdout, service_name):
# GIVEN Logger is initialized
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN a lambda function is decorated with logger
@logger.inject_lambda_context
@@ -120,9 +114,9 @@ def handler(event, context):
assert key in log
-def test_inject_lambda_context_log_event_request(lambda_context, stdout, lambda_event):
+def test_inject_lambda_context_log_event_request(lambda_context, stdout, lambda_event, service_name):
# GIVEN Logger is initialized
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN a lambda function is decorated with logger instructed to log event
@logger.inject_lambda_context(log_event=True)
@@ -136,10 +130,12 @@ def handler(event, context):
assert logged_event["message"] == lambda_event
-def test_inject_lambda_context_log_event_request_env_var(monkeypatch, lambda_context, stdout, lambda_event):
+def test_inject_lambda_context_log_event_request_env_var(
+ monkeypatch, lambda_context, stdout, lambda_event, service_name
+):
# GIVEN Logger is initialized
monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true")
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN a lambda function is decorated with logger instructed to log event
# via POWERTOOLS_LOGGER_LOG_EVENT env
@@ -154,9 +150,11 @@ def handler(event, context):
assert logged_event["message"] == lambda_event
-def test_inject_lambda_context_log_no_request_by_default(monkeypatch, lambda_context, stdout, lambda_event):
+def test_inject_lambda_context_log_no_request_by_default(
+ monkeypatch, lambda_context, stdout, lambda_event, service_name
+):
# GIVEN Logger is initialized
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN a lambda function is decorated with logger
@logger.inject_lambda_context
@@ -170,7 +168,7 @@ def handler(event, context):
assert log["message"] != lambda_event
-def test_inject_lambda_cold_start(lambda_context, stdout):
+def test_inject_lambda_cold_start(lambda_context, stdout, service_name):
# cold_start can be false as it's a global variable in Logger module
# so we reset it to simulate the correct behaviour
# since Lambda will only import our logger lib once per concurrent execution
@@ -179,7 +177,7 @@ def test_inject_lambda_cold_start(lambda_context, stdout):
logger.is_cold_start = True
# GIVEN Logger is initialized
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN a lambda function is decorated with logger, and called twice
@logger.inject_lambda_context
@@ -225,9 +223,9 @@ def test_package_logger_format(capsys):
assert logger.level == logging.DEBUG
-def test_logger_append_duplicated(stdout):
+def test_logger_append_duplicated(stdout, service_name):
# GIVEN Logger is initialized with request_id field
- logger = Logger(stream=stdout, request_id="value")
+ logger = Logger(service=service_name, stream=stdout, request_id="value")
# WHEN `request_id` is appended to the existing structured log
# using a different value
@@ -239,17 +237,17 @@ def test_logger_append_duplicated(stdout):
assert "new_value" == log["request_id"]
-def test_logger_invalid_sampling_rate():
+def test_logger_invalid_sampling_rate(service_name):
# GIVEN Logger is initialized
# WHEN sampling_rate non-numeric value
# THEN we should raise InvalidLoggerSamplingRateError
with pytest.raises(InvalidLoggerSamplingRateError):
- Logger(stream=stdout, sampling_rate="TEST")
+ Logger(service=service_name, stream=stdout, sampling_rate="TEST")
-def test_inject_lambda_context_with_structured_log(lambda_context, stdout):
+def test_inject_lambda_context_with_structured_log(lambda_context, stdout, service_name):
# GIVEN Logger is initialized
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN structure_logs has been used to add an additional key upfront
# and a lambda function is decorated with logger.inject_lambda_context
@@ -274,13 +272,13 @@ def handler(event, context):
assert key in log
-def test_logger_children_propagate_changes(stdout):
+def test_logger_children_propagate_changes(stdout, service_name):
# GIVEN Loggers are initialized
# create child logger before parent to mimick
# importing logger from another module/file
# as loggers are created in global scope
- child = Logger(stream=stdout, service="order", child=True)
- parent = Logger(stream=stdout, service="order")
+ child = Logger(stream=stdout, service=service_name, child=True)
+ parent = Logger(stream=stdout, service=service_name)
# WHEN a child Logger adds an additional key
child.structure_logs(append=True, customer_id="value")
@@ -293,14 +291,14 @@ def test_logger_children_propagate_changes(stdout):
parent_log, child_log = capture_multiple_logging_statements_output(stdout)
assert "customer_id" in parent_log
assert "customer_id" in child_log
- assert child.parent.name == "order"
+ assert child.parent.name == service_name
-def test_logger_child_not_set_returns_same_logger():
+def test_logger_child_not_set_returns_same_logger(stdout):
# GIVEN two Loggers are initialized with the same service name
# WHEN child param isn't set
- logger_one = Logger(service="something")
- logger_two = Logger(service="something")
+ logger_one = Logger(service="something", stream=stdout)
+ logger_two = Logger(service="something", stream=stdout)
# THEN we should have two Logger instances
# however inner logger wise should be the same
@@ -308,35 +306,42 @@ def test_logger_child_not_set_returns_same_logger():
assert logger_one._logger is logger_two._logger
assert logger_one.name is logger_two.name
+ # THEN we should also not see any duplicated logs
+ logger_one.info("One - Once")
+ logger_two.info("Two - Once")
+
+ logs = list(capture_multiple_logging_statements_output(stdout))
+ assert len(logs) == 2
+
-def test_logger_level_case_insensitive():
+def test_logger_level_case_insensitive(service_name):
# GIVEN a Loggers is initialized
# WHEN log level is set as "info" instead of "INFO"
- logger = Logger(level="info")
+ logger = Logger(service=service_name, level="info")
# THEN we should correctly set log level as INFO
assert logger.level == logging.INFO
-def test_logger_level_not_set():
+def test_logger_level_not_set(service_name):
# GIVEN a Loggers is initialized
# WHEN no log level was passed
- logger = Logger()
+ logger = Logger(service=service_name)
# THEN we should default to INFO
assert logger.level == logging.INFO
-def test_logger_level_as_int():
+def test_logger_level_as_int(service_name):
# GIVEN a Loggers is initialized
# WHEN log level is int
- logger = Logger(level=logging.INFO)
+ logger = Logger(service=service_name, level=logging.INFO)
# THEN we should be expected int (20, in this case)
assert logger.level == logging.INFO
-def test_logger_level_env_var_as_int(monkeypatch):
+def test_logger_level_env_var_as_int(monkeypatch, service_name):
# GIVEN Logger is initialized
# WHEN log level is explicitly defined via LOG_LEVEL env as int
# THEN Logger should propagate ValueError
@@ -344,12 +349,12 @@ def test_logger_level_env_var_as_int(monkeypatch):
# and '50' is not a correct log level
monkeypatch.setenv("LOG_LEVEL", 50)
with pytest.raises(ValueError, match="Unknown level: '50'"):
- Logger()
+ Logger(service=service_name)
-def test_logger_record_caller_location(stdout):
+def test_logger_record_caller_location(stdout, service_name):
# GIVEN Logger is initialized
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
# WHEN log statement is run
logger.info("log")
@@ -361,13 +366,13 @@ def test_logger_record_caller_location(stdout):
assert caller_fn_name in log["location"]
-def test_logger_do_not_log_twice_when_root_logger_is_setup(stdout):
+def test_logger_do_not_log_twice_when_root_logger_is_setup(stdout, service_name):
# GIVEN Lambda configures the root logger with a handler
root_logger = logging.getLogger()
root_logger.addHandler(logging.StreamHandler(stream=stdout))
# WHEN we create a new Logger and child Logger
- logger = Logger(stream=stdout)
+ logger = Logger(service=service_name, stream=stdout)
child_logger = Logger(child=True, stream=stdout)
logger.info("hello")
child_logger.info("hello again")
diff --git a/tests/unit/test_tracing.py b/tests/unit/test_tracing.py
index 3e2492b9e15..c8df34a76a6 100644
--- a/tests/unit/test_tracing.py
+++ b/tests/unit/test_tracing.py
@@ -57,19 +57,19 @@ def reset_tracing_config(mocker):
@pytest.fixture
def in_subsegment_mock():
- class Async_context_manager(mock.MagicMock):
+ class AsyncContextManager(mock.MagicMock):
async def __aenter__(self, *args, **kwargs):
return self.__enter__()
async def __aexit__(self, *args, **kwargs):
return self.__exit__(*args, **kwargs)
- class In_subsegment(NamedTuple):
- in_subsegment: mock.MagicMock = Async_context_manager()
+ class InSubsegment(NamedTuple):
+ in_subsegment: mock.MagicMock = AsyncContextManager()
put_annotation: mock.MagicMock = mock.MagicMock()
put_metadata: mock.MagicMock = mock.MagicMock()
- in_subsegment = In_subsegment()
+ in_subsegment = InSubsegment()
in_subsegment.in_subsegment.return_value.__enter__.return_value.put_annotation = in_subsegment.put_annotation
in_subsegment.in_subsegment.return_value.__enter__.return_value.put_metadata = in_subsegment.put_metadata