diff --git a/.github/scripts/label_related_issue.js b/.github/scripts/label_related_issue.js new file mode 100644 index 00000000000..1953412ae41 --- /dev/null +++ b/.github/scripts/label_related_issue.js @@ -0,0 +1,30 @@ +module.exports = async ({github, context}) => { + const prBody = context.payload.body; + const prNumber = context.payload.number; + const releaseLabel = process.env.RELEASE_LABEL; + const maintainersTeam = process.env.MAINTAINERS_TEAM + + const RELATED_ISSUE_REGEX = /Issue number:.+(\d)/ + + const matcher = new RegExp(RELATED_ISSUE_REGEX) + const isMatch = matcher.exec(prBody) + if (isMatch != null) { + let relatedIssueNumber = isMatch[1] + console.info(`Auto-labeling related issue ${relatedIssueNumber} for release`) + + return await github.rest.issues.addLabels({ + issue_number: relatedIssueNumber, + owner: context.repo.owner, + repo: context.repo.repo, + labels: [releaseLabel] + }) + } else { + let msg = `${maintainersTeam} No related issues found. Please ensure '${releaseLabel}' label is applied before releasing.`; + return await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + body: msg, + issue_number: prNumber, + }); + } +} diff --git a/.github/workflows/auto-merge.yml b/.github/workflows/auto-merge.yml index 14bacb12f69..7ce6ff8ba29 100644 --- a/.github/workflows/auto-merge.yml +++ b/.github/workflows/auto-merge.yml @@ -14,7 +14,7 @@ jobs: steps: - name: Dependabot metadata id: metadata - uses: dependabot/fetch-metadata@v1.1.1 + uses: dependabot/fetch-metadata@v1.3.3 with: github-token: "${{ secrets.GITHUB_TOKEN }}" - name: Enable auto-merge for mypy-boto3 stubs Dependabot PRs diff --git a/.github/workflows/export_pr_details.yml b/.github/workflows/export_pr_details.yml index e41df94aca8..af03150c3d5 100644 --- a/.github/workflows/export_pr_details.yml +++ b/.github/workflows/export_pr_details.yml @@ -41,7 +41,7 @@ jobs: uses: actions/github-script@v6 # For security, we only download artifacts tied to the successful PR recording workflow with: - github-token: ${{ inputs.token }} + github-token: ${{ secrets.token }} script: | const fs = require('fs'); diff --git a/.github/workflows/on_closed_issues.yml b/.github/workflows/on_closed_issues.yml index a6dadcd843d..ca815e4c07f 100644 --- a/.github/workflows/on_closed_issues.yml +++ b/.github/workflows/on_closed_issues.yml @@ -9,4 +9,10 @@ jobs: - uses: aws-actions/closed-issue-message@v1 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" - message: "Comments on closed issues are hard for our team to see." + message: | + ### ⚠️COMMENT VISIBILITY WARNING⚠️ + This issue is now closed. Please be mindful that future comments are hard for our team to see. + + If you need more assistance, please either tag a [team member](https://github.com/awslabs/aws-lambda-powertools-python/blob/develop/MAINTAINERS.md#current-maintainers) or open a new issue that references this one. + + If you wish to keep having a conversation with other community members under this issue feel free to do so. diff --git a/.github/workflows/on_merged_pr.yml b/.github/workflows/on_merged_pr.yml index 29b4ed27ad3..97029740cdb 100644 --- a/.github/workflows/on_merged_pr.yml +++ b/.github/workflows/on_merged_pr.yml @@ -1,3 +1,6 @@ +# Maintenance: Verify why we're having permissions issues even with write scope, then re-enable it. +# logs: https://github.com/awslabs/aws-lambda-powertools-python/runs/7030238348?check_suite_focus=true + on: pull_request: types: @@ -11,37 +14,14 @@ jobs: release_label_on_merge: if: github.event.pull_request.merged == true && github.event.pull_request.user.login != 'dependabot[bot]' runs-on: ubuntu-latest + permissions: + issues: write # required for new scoped token + pull-requests: write # required for new scoped token steps: - name: "Label PR related issue for release" uses: actions/github-script@v6 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | - const prBody = context.payload.body; - const prNumber = context.payload.number; - const releaseLabel = process.env.RELEASE_LABEL; - const maintainersTeam = process.env.MAINTAINERS_TEAM - - const RELATED_ISSUE_REGEX = /Issue number:.+(\d)/ - - const matcher = new RegExp(RELATED_ISSUE_REGEX) - const isMatch = matcher.exec(prBody) - if (isMatch != null) { - let relatedIssueNumber = isMatch[1] - console.info(`Auto-labeling related issue ${relatedIssueNumber} for release`) - - await github.rest.issues.addLabels({ - issue_number: relatedIssueNumber, - owner: context.repo.owner, - repo: context.repo.repo, - labels: [releaseLabel] - }) - } else { - let msg = `${maintainersTeam} No related issues found. Please ensure '${RELEASE_LABEL}.' label is applied before releasing.`; - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - body: msg, - issue_number: prNumber, - }); - } + const script = require('.github/scripts/label_related_issue.js') + await script({github, context, core}) diff --git a/.github/workflows/on_opened_pr.yml b/.github/workflows/on_opened_pr.yml index 578a50d252a..9a539976467 100644 --- a/.github/workflows/on_opened_pr.yml +++ b/.github/workflows/on_opened_pr.yml @@ -30,7 +30,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} # Maintenance: convert into a standalone JS like post_release.js script: | - const prBody = ${{ needs.get_pr_details.outputs.prBody }}; + const prBody = "${{ needs.get_pr_details.outputs.prBody }}"; const prNumber = ${{ needs.get_pr_details.outputs.prNumber }}; const blockLabel = process.env.BLOCK_LABEL; const blockReasonLabel = process.env.BLOCK_REASON_LABEL; @@ -42,7 +42,7 @@ jobs: if (isMatch == null) { console.info(`No related issue found, maybe the author didn't use the template but there is one.`) - let msg = `⚠️ No related issues found. Please ensure there is an open issue related to this change to avoid significant delays or closure. ⚠️`; + let msg = "No related issues found. Please ensure there is an open issue related to this change to avoid significant delays or closure."; await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, diff --git a/.github/workflows/python_docs.yml b/.github/workflows/python_docs.yml index 295ecb334c8..3a6e15e5431 100644 --- a/.github/workflows/python_docs.yml +++ b/.github/workflows/python_docs.yml @@ -5,9 +5,10 @@ on: branches: - develop paths: - - 'docs/**' - - 'CHANGELOG.md' - - 'mkdocs.yml' + - "docs/**" + - "CHANGELOG.md" + - "mkdocs.yml" + - "examples/**" jobs: docs: @@ -20,8 +21,15 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.8" + # Maintenance: temporarily until we drop Python 3.6 and make cfn-lint a dev dependency + - name: Setup Cloud Formation Linter with Latest Version + uses: scottbrenner/cfn-lint-action@v2 - name: Install dependencies run: make dev + - name: Lint documentation + run: | + make lint-docs + cfn-lint examples/**/*.yaml - name: Setup doc deploy run: | git config --global user.name Docs deploy diff --git a/.gitignore b/.gitignore index 5d28e3a615f..b776e1999c2 100644 --- a/.gitignore +++ b/.gitignore @@ -306,3 +306,4 @@ site/ !docs/overrides/*.html !.github/workflows/lib +examples/**/sam/.aws-sam diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 00000000000..4d571206e07 --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,244 @@ +# Rules: https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md + +# Default state for all rules +default: true + +# Path to configuration file to extend +extends: null + +# MD001/heading-increment/header-increment - Heading levels should only increment by one level at a time +MD001: true + +# MD002/first-heading-h1/first-header-h1 - First heading should be a top-level heading +# NOTE: We use h2 due to font size +MD002: false + +# MD003/heading-style/header-style - Heading style +MD003: + # Heading style + style: "consistent" + +# MD004/ul-style - Unordered list style +MD004: + # List style + style: "consistent" + +# MD005/list-indent - Inconsistent indentation for list items at the same level +MD005: true + +# MD006/ul-start-left - Consider starting bulleted lists at the beginning of the line +MD006: true + +# MD007/ul-indent - Unordered list indentation +MD007: + # Spaces for indent + indent: 4 + # Whether to indent the first level of the list + start_indented: false + # Spaces for first level indent (when start_indented is set) + start_indent: 2 + +# MD009/no-trailing-spaces - Trailing spaces +MD009: + # Spaces for line break + br_spaces: 2 + # Allow spaces for empty lines in list items + list_item_empty_lines: false + # Include unnecessary breaks + strict: false + +# MD010/no-hard-tabs - Hard tabs +# NOTE: Mkdocs Material theme features like code annotations, tabbed content require it +MD010: false + +# MD011/no-reversed-links - Reversed link syntax +MD011: true + +# MD012/no-multiple-blanks - Multiple consecutive blank lines +MD012: + # Consecutive blank lines + maximum: 1 + +# MD013/line-length - Line length +MD013: + # Number of characters + line_length: 380 + # Number of characters for headings + heading_line_length: 80 + # Number of characters for code blocks + code_block_line_length: 265 + # Include code blocks + code_blocks: true + # Include tables + tables: false + # Include headings + headings: true + # Include headings + headers: true + # Strict length checking + strict: false + # Stern length checking + stern: false + +# MD014/commands-show-output - Dollar signs used before commands without showing output +MD014: true + +# MD018/no-missing-space-atx - No space after hash on atx style heading +MD018: true + +# MD019/no-multiple-space-atx - Multiple spaces after hash on atx style heading +MD019: true + +# MD020/no-missing-space-closed-atx - No space inside hashes on closed atx style heading +MD020: true + +# MD021/no-multiple-space-closed-atx - Multiple spaces inside hashes on closed atx style heading +MD021: true + +# MD022/blanks-around-headings/blanks-around-headers - Headings should be surrounded by blank lines +MD022: + # Blank lines above heading + lines_above: 1 + # Blank lines below heading + lines_below: 1 + +# MD023/heading-start-left/header-start-left - Headings must start at the beginning of the line +MD023: true + +# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content +MD024: + # Only check sibling headings + allow_different_nesting: false + # Only check sibling headings + siblings_only: false + +# MD025/single-title/single-h1 - Multiple top-level headings in the same document +MD025: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD026/no-trailing-punctuation - Trailing punctuation in heading +MD026: + # Punctuation characters + punctuation: ".,;:!。,;:!" + +# MD027/no-multiple-space-blockquote - Multiple spaces after blockquote symbol +MD027: true + +# MD028/no-blanks-blockquote - Blank line inside blockquote +MD028: true + +# MD029/ol-prefix - Ordered list item prefix +MD029: + # List style + style: "one_or_ordered" + +# MD030/list-marker-space - Spaces after list markers +MD030: + # Spaces for single-line unordered list items + ul_single: 1 + # Spaces for single-line ordered list items + ol_single: 1 + # Spaces for multi-line unordered list items + ul_multi: 1 + # Spaces for multi-line ordered list items + ol_multi: 1 + +# MD031/blanks-around-fences - Fenced code blocks should be surrounded by blank lines +MD031: + # Include list items + list_items: true + +# MD032/blanks-around-lists - Lists should be surrounded by blank lines +MD032: true + +# MD033/no-inline-html - Inline HTML +# NOTE: Some content like Logger '' triggers false positives +MD033: false + +# MD034/no-bare-urls - Bare URL used +MD034: true + +# MD035/hr-style - Horizontal rule style +MD035: + # Horizontal rule style + style: "consistent" + +# MD036/no-emphasis-as-heading/no-emphasis-as-header - Emphasis used instead of a heading +# NOTE: We use **** instead of yet another sub-heading that might not appear in the navigation. +# this is a trade-off we make to not a gigantic right-navigation +MD036: false + +# MD037/no-space-in-emphasis - Spaces inside emphasis markers +MD037: true + +# MD038/no-space-in-code - Spaces inside code span elements +# mkdocs-material requires these in tab content +MD038: false + +# MD039/no-space-in-links - Spaces inside link text +MD039: true + +# MD040/fenced-code-language - Fenced code blocks should have a language specified +MD040: true + +# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading +MD041: + # Heading level + level: 2 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD042/no-empty-links - No empty links +# NOTE: Clipboard links like Lambda Layers use empty links +MD042: false + +# MD043/required-headings/required-headers - Required heading structure +# NOTE: Enforce our minimum headers across the docs +MD043: + # List of headings + headings: + [ + "*", + "## Key features", + "*", + "## Getting started", + "*", + "## Advanced", + "*", + "## Testing your code", + "*", + ] + +# MD044/proper-names - Proper names should have the correct capitalization +MD044: + # List of proper names + names: [] + # Include code blocks + code_blocks: true + # Include HTML elements + html_elements: true + +# MD045/no-alt-text - Images should have alternate text (alt text) +MD045: true + +# MD046/code-block-style - Code block style +# Material theme tabbed content feature use indented and simple use fenced; can't support both +MD046: false + +# MD047/single-trailing-newline - Files should end with a single newline character +MD047: true + +# MD048/code-fence-style - Code fence style +MD048: false + +# MD051/link-fragments - Link fragments should be valid +MD051: true + +# MD052/reference-links-images - Reference links and images should use a label that is defined +MD052: true + +# MD053/link-image-reference-definitions - Link and image reference definitions should be needed +MD053: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 61e98378017..8a614f78968 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,30 +4,39 @@ # All checks can be run locally via `make pr` repos: - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v2.4.0 - hooks: - - id: check-merge-conflict - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-toml - - repo: local - hooks: - - id: black - name: formatting::black - entry: poetry run black - language: system - types: [python] - - id: isort - name: formatting::isort - entry: poetry run isort - language: system - types: [python] - - repo: local - hooks: - - id: flake8 - name: linting::flake8 - entry: poetry run flake8 - language: system - types: [python] - exclude: example + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.4.0 + hooks: + - id: check-merge-conflict + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-toml + - repo: local + hooks: + - id: black + name: formatting::black + entry: poetry run black + language: system + types: [python] + - id: isort + name: formatting::isort + entry: poetry run isort + language: system + types: [python] + - repo: local + hooks: + - id: flake8 + name: linting::flake8 + entry: poetry run flake8 + language: system + types: [python] + - repo: https://github.com/igorshubovych/markdownlint-cli + rev: "11c08644ce6df850480d98f628596446a526cbc6" # frozen: v0.31.1 + hooks: + - id: markdownlint + args: ["--fix"] + - repo: https://github.com/aws-cloudformation/cfn-python-lint + rev: v0.61.1 + hooks: + - id: cfn-python-lint + files: examples/.*\.(yaml|yml)$ diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ca92b6a72a..f5b0cc7b5a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,51 @@ + + # Changelog All notable changes to this project will be documented in this file. This project follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) format for changes and adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.26.3 - 2022-07-04 + +### Bug Fixes + +* **ci:** improve msg visibility on closed issues +* **ci:** disable merged_pr workflow +* **ci:** merged_pr add issues write access +* **ci:** quote prBody GH expr on_opened_pr +* **ci:** remove utf-8 body in octokit body req +* **ci:** reusable workflow secrets param +* **logger:** support additional args for handlers when injecting lambda context decorator ([#1276](https://github.com/awslabs/aws-lambda-powertools-python/issues/1276)) + +### Documentation + +* **lint:** add markdownlint rules and automation ([#1256](https://github.com/awslabs/aws-lambda-powertools-python/issues/1256)) +* **logger:** document enriching logs with logrecord attributes ([#1271](https://github.com/awslabs/aws-lambda-powertools-python/issues/1271)) +* **logger:** snippets split, improved, and lint ([#1262](https://github.com/awslabs/aws-lambda-powertools-python/issues/1262)) +* **metrics:** snippets split, improved, and lint ([#1272](https://github.com/awslabs/aws-lambda-powertools-python/issues/1272)) +* **tracer:** snippets split, improved, and lint ([#1261](https://github.com/awslabs/aws-lambda-powertools-python/issues/1261)) +* **tracer:** split and lint code snippets ([#1260](https://github.com/awslabs/aws-lambda-powertools-python/issues/1260)) + +### Maintenance + +* add sam build gitignore +* **documentation**: move to approach B for multiple IaC +* **ci:** improve wording on closed issues action +* **ci:** deactivate on_merged_pr workflow +* **ci:** reactivate on_merged_pr workflow +* **deps:** bump dependabot/fetch-metadata from 1.1.1 to 1.3.2 ([#1269](https://github.com/awslabs/aws-lambda-powertools-python/issues/1269)) +* **deps:** bump aws-xray-sdk from 2.9.0 to 2.10.0 ([#1270](https://github.com/awslabs/aws-lambda-powertools-python/issues/1270)) +* **deps:** bump dependabot/fetch-metadata from 1.3.2 to 1.3.3 ([#1273](https://github.com/awslabs/aws-lambda-powertools-python/issues/1273)) +* **deps-dev:** bump flake8-bugbear from 22.6.22 to 22.7.1 ([#1274](https://github.com/awslabs/aws-lambda-powertools-python/issues/1274)) +* **deps-dev:** bump flake8-bugbear from 22.4.25 to 22.6.22 ([#1258](https://github.com/awslabs/aws-lambda-powertools-python/issues/1258)) +* **deps-dev:** bump mypy-boto3-dynamodb from 1.24.0 to 1.24.12 ([#1255](https://github.com/awslabs/aws-lambda-powertools-python/issues/1255)) +* **deps-dev:** bump mypy-boto3-secretsmanager ([#1252](https://github.com/awslabs/aws-lambda-powertools-python/issues/1252)) +* **governance:** fix on_merged_pr workflow syntax +* **governance:** warn message on closed issues +* **layers:** bump to 21 for 1.26.2 +* **test-perf:** use pytest-benchmark to improve reliability ([#1250](https://github.com/awslabs/aws-lambda-powertools-python/issues/1250)) + ## 1.26.2 - 2022-06-16 ### Bug Fixes @@ -32,6 +74,7 @@ This project follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) fo * **metrics:** revert dimensions test before splitting ([#1243](https://github.com/awslabs/aws-lambda-powertools-python/issues/1243)) ## 1.26.1 - 2022-06-07 + ### Bug Fixes * **metrics:** raise SchemaValidationError for >8 metric dimensions ([#1240](https://github.com/awslabs/aws-lambda-powertools-python/issues/1240)) @@ -47,7 +90,6 @@ This project follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) fo * **deps-dev:** bump mypy from 0.950 to 0.960 ([#1224](https://github.com/awslabs/aws-lambda-powertools-python/issues/1224)) * **deps-dev:** bump mypy-boto3-secretsmanager from 1.23.0.post1 to 1.23.8 ([#1225](https://github.com/awslabs/aws-lambda-powertools-python/issues/1225)) - ## 1.26.0 - 2022-05-20 ### Bug Fixes @@ -115,6 +157,7 @@ This project follows [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) fo ### Bug Fixes * **deps**: correct py36 marker for jmespath + ## 1.25.8 - 2022-04-21 ### Bug Fixes @@ -365,6 +408,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * BEFORE: **Idiomatic**. Utilities follow programming language idioms and language-specific best practices. * AFTER: **Progressive**. Utilities are designed to be incrementally adoptable for customers at any stage of their Serverless journey. They follow language idioms and their community’s common practices. + ### Bug Fixes * **ci:** change supported python version from 3.6.1 to 3.6.2, bump black ([#807](https://github.com/awslabs/aws-lambda-powertools-python/issues/807)) @@ -389,6 +433,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * **idempotency:** add support for DynamoDB composite keys ([#808](https://github.com/awslabs/aws-lambda-powertools-python/issues/808)) * **tenets:** update Idiomatic tenet to Progressive ([#823](https://github.com/awslabs/aws-lambda-powertools-python/issues/823)) * **docs:** remove Lambda Layer version tag + ### Features * **apigateway:** add Router to allow large routing composition ([#645](https://github.com/awslabs/aws-lambda-powertools-python/issues/645)) @@ -424,6 +469,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th ### Documentation * add new public layer ARNs ([#746](https://github.com/awslabs/aws-lambda-powertools-python/issues/746)) + ### Maintenance * ignore constants in test cov ([#745](https://github.com/awslabs/aws-lambda-powertools-python/issues/745)) @@ -495,6 +541,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * **deps-dev:** bump pytest from 6.2.4 to 6.2.5 ([#662](https://github.com/awslabs/aws-lambda-powertools-python/issues/662)) * **deps-dev:** bump mike from 0.6.0 to 1.0.1 ([#453](https://github.com/awslabs/aws-lambda-powertools-python/issues/453)) * **license:** add third party license to pyproject.toml ([#641](https://github.com/awslabs/aws-lambda-powertools-python/issues/641)) + ## 1.20.2 - 2021-09-02 ### Bug Fixes @@ -587,6 +634,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * **feature-flags:** fix SAM infra, convert CDK to Python * **feature-flags:** fix sample feature name in evaluate method * **feature-flags:** add guidance when to use vs env vars vs parameters + ### Features * **api-gateway:** add support for custom serializer ([#568](https://github.com/awslabs/aws-lambda-powertools-python/issues/568)) @@ -607,6 +655,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * **deps-dev:** bump mkdocs-material from 7.2.0 to 7.2.1 ([#566](https://github.com/awslabs/aws-lambda-powertools-python/issues/566)) * **deps-dev:** bump mkdocs-material from 7.1.11 to 7.2.0 ([#551](https://github.com/awslabs/aws-lambda-powertools-python/issues/551)) * **deps-dev:** bump flake8-black from 0.2.1 to 0.2.3 ([#541](https://github.com/awslabs/aws-lambda-powertools-python/issues/541)) + ## 1.18.1 - 2021-07-23 ### Bug Fixes @@ -713,9 +762,10 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * **Parser**: Upgrade Pydantic to 1.8.2 due to CVE-2021-29510 ## 1.16.0 - 2021-05-17 + ### Features -- **data-classes(API Gateway, ALB):** New method to decode base64 encoded body ([#425](https://github.com/awslabs/aws-lambda-powertools-python/issues/425)) -- **data-classes(CodePipeline):** Support for CodePipeline job event and methods to handle artifacts more easily ([#416](https://github.com/awslabs/aws-lambda-powertools-python/issues/416)) +* **data-classes(API Gateway, ALB):** New method to decode base64 encoded body ([#425](https://github.com/awslabs/aws-lambda-powertools-python/issues/425)) +* **data-classes(CodePipeline):** Support for CodePipeline job event and methods to handle artifacts more easily ([#416](https://github.com/awslabs/aws-lambda-powertools-python/issues/416)) ## 1.15.1 - 2021-05-13 @@ -789,6 +839,7 @@ Tenet update! We've updated **Idiomatic** tenet to **Progressive** to reflect th * **Idempotency**: Error handling for missing idempotency key and `save_in_progress` errors ## 1.11.0 - 2021-03-05 + ### Fixed * **Tracer**: Lazy loads X-Ray SDK to increase perf by 75% for those not instantiating Tracer @@ -841,241 +892,266 @@ No changes. Bumped version to trigger new pipeline build for layer publishing. ## 1.10.0 - 2021-01-18 ### Added -- **Utilities**: Added support for AppConfig in Parameters utility -- **Logger**: Added support for `extra` parameter to add additional root fields when logging messages -- **Logger**: Added support to Pytest Live Log feat. via feature toggle `POWERTOOLS_LOG_DEDUPLICATION_DISABLED` -- **Tracer**: Added support to disable auto-capturing response and exception as metadata -- **Utilities**: Added support to handle custom string/integer formats in JSON Schema in Validator utility -- **Install**: Added new Lambda Layer with all extra dependencies installed, available in Serverless Application Repository (SAR) +* **Utilities**: Added support for AppConfig in Parameters utility +* **Logger**: Added support for `extra` parameter to add additional root fields when logging messages +* **Logger**: Added support to Pytest Live Log feat. via feature toggle `POWERTOOLS_LOG_DEDUPLICATION_DISABLED` +* **Tracer**: Added support to disable auto-capturing response and exception as metadata +* **Utilities**: Added support to handle custom string/integer formats in JSON Schema in Validator utility +* **Install**: Added new Lambda Layer with all extra dependencies installed, available in Serverless Application Repository (SAR) ### Fixed -- **Docs**: Added missing SNS parser model -- **Docs**: Added new environment variables for toggling features in Logger and Tracer: `POWERTOOLS_LOG_DEDUPLICATION_DISABLED`, `POWERTOOLS_TRACER_CAPTURE_RESPONSE`, `POWERTOOLS_TRACER_CAPTURE_ERROR` -- **Docs**: Fixed incorrect import for Cognito data classes in Event Sources utility +* **Docs**: Added missing SNS parser model +* **Docs**: Added new environment variables for toggling features in Logger and Tracer: `POWERTOOLS_LOG_DEDUPLICATION_DISABLED`, `POWERTOOLS_TRACER_CAPTURE_RESPONSE`, `POWERTOOLS_TRACER_CAPTURE_ERROR` +* **Docs**: Fixed incorrect import for Cognito data classes in Event Sources utility ## 1.9.1 - 2020-12-21 ### Fixed -- **Logger**: Bugfix to prevent parent loggers with the same name being configured more than once +* **Logger**: Bugfix to prevent parent loggers with the same name being configured more than once ### Added -- **Docs**: Add clarification to Tracer docs for how `capture_method` decorator can cause function responses to be read and serialized. -- **Utilities**: Added equality to ease testing Event source data classes -- **Package**: Added `py.typed` for initial work needed for PEP 561 compliance +* **Docs**: Add clarification to Tracer docs for how `capture_method` decorator can cause function responses to be read and serialized. +* **Utilities**: Added equality to ease testing Event source data classes +* **Package**: Added `py.typed` for initial work needed for PEP 561 compliance ## 1.9.0 - 2020-12-04 ### Added -- **Utilities**: Added Kinesis, S3, CloudWatch Logs, Application Load Balancer, and SES support in `Parser` -- **Docs**: Sidebar menu are now always expanded +* **Utilities**: Added Kinesis, S3, CloudWatch Logs, Application Load Balancer, and SES support in `Parser` +* **Docs**: Sidebar menu are now always expanded ### Fixed -- **Docs**: Broken link to GitHub to homepage +* **Docs**: Broken link to GitHub to homepage ## 1.8.0 - 2020-11-20 ### Added -- **Utilities**: Added support for new EventBridge Replay field in `Parser` and `Event source data classes` -- **Utilities**: Added SNS support in `Parser` -- **Utilities**: Added API Gateway HTTP API data class support for new IAM and Lambda authorizer in `Event source data classes` -- **Docs**: Add new FAQ section for Logger on how to enable debug logging for boto3 -- **Docs**: Add explicit minimal set of permissions required to use Layers provided by Serverless Application Repository (SAR) +* **Utilities**: Added support for new EventBridge Replay field in `Parser` and `Event source data classes` +* **Utilities**: Added SNS support in `Parser` +* **Utilities**: Added API Gateway HTTP API data class support for new IAM and Lambda authorizer in `Event source data classes` +* **Docs**: Add new FAQ section for Logger on how to enable debug logging for boto3 +* **Docs**: Add explicit minimal set of permissions required to use Layers provided by Serverless Application Repository (SAR) ### Fixed -- **Docs**: Fix typo in Dataclasses example for SES when fetching common email headers +* **Docs**: Fix typo in Dataclasses example for SES when fetching common email headers ## 1.7.0 - 2020-10-26 ### Added -- **Utilities**: Add new `Parser` utility to provide parsing and deep data validation using Pydantic Models -- **Utilities**: Add case insensitive header lookup, and Cognito custom auth triggers to `Event source data classes` +* **Utilities**: Add new `Parser` utility to provide parsing and deep data validation using Pydantic Models +* **Utilities**: Add case insensitive header lookup, and Cognito custom auth triggers to `Event source data classes` ### Fixed -- **Logger**: keeps Lambda root logger handler, and add log filter instead to prevent child log records duplication -- **Docs**: Improve wording on adding log keys conditionally +* **Logger**: keeps Lambda root logger handler, and add log filter instead to prevent child log records duplication +* **Docs**: Improve wording on adding log keys conditionally ## 1.6.1 - 2020-09-23 ### Fixed -- **Utilities**: Fix issue with boolean values in DynamoDB stream event data class. +* **Utilities**: Fix issue with boolean values in DynamoDB stream event data class. ## 1.6.0 - 2020-09-22 ### Added -- **Metrics**: Support adding multiple metric values to a single metric name -- **Utilities**: Add new `Validator` utility to validate inbound events and responses using JSON Schema -- **Utilities**: Add new `Event source data classes` utility to easily describe event schema of popular event sources -- **Docs**: Add new `Testing your code` section to both Logger and Metrics page, and content width is now wider -- **Tracer**: Support for automatically disable Tracer when running a Chalice app +* **Metrics**: Support adding multiple metric values to a single metric name +* **Utilities**: Add new `Validator` utility to validate inbound events and responses using JSON Schema +* **Utilities**: Add new `Event source data classes` utility to easily describe event schema of popular event sources +* **Docs**: Add new `Testing your code` section to both Logger and Metrics page, and content width is now wider +* **Tracer**: Support for automatically disable Tracer when running a Chalice app ### Fixed -- **Docs**: Improve wording on log sampling feature in Logger, and removed duplicate content on main page -- **Utilities**: Remove DeleteMessageBatch API call when there are no messages to delete +* **Docs**: Improve wording on log sampling feature in Logger, and removed duplicate content on main page +* **Utilities**: Remove DeleteMessageBatch API call when there are no messages to delete ## 1.5.0 - 2020-09-04 ### Added -- **Logger**: Add `xray_trace_id` to log output to improve integration with CloudWatch Service Lens -- **Logger**: Allow reordering of logged output -- **Utilities**: Add new `SQS batch processing` utility to handle partial failures in processing message batches -- **Utilities**: Add typing utility providing static type for lambda context object -- **Utilities**: Add `transform=auto` in parameters utility to deserialize parameter values based on the key name +* **Logger**: Add `xray_trace_id` to log output to improve integration with CloudWatch Service Lens +* **Logger**: Allow reordering of logged output +* **Utilities**: Add new `SQS batch processing` utility to handle partial failures in processing message batches +* **Utilities**: Add typing utility providing static type for lambda context object +* **Utilities**: Add `transform=auto` in parameters utility to deserialize parameter values based on the key name ### Fixed -- **Logger**: The value of `json_default` formatter is no longer written to logs +* **Logger**: The value of `json_default` formatter is no longer written to logs ## 1.4.0 - 2020-08-25 ### Added -- **All**: Official Lambda Layer via [Serverless Application Repository](https://serverlessrepo.aws.amazon.com/applications/eu-west-1/057560766410/aws-lambda-powertools-python-layer) -- **Tracer**: `capture_method` and `capture_lambda_handler` now support **capture_response=False** parameter to prevent Tracer to capture response as metadata to allow customers running Tracer with sensitive workloads +* **All**: Official Lambda Layer via [Serverless Application Repository](https://serverlessrepo.aws.amazon.com/applications/eu-west-1/057560766410/aws-lambda-powertools-python-layer) +* **Tracer**: `capture_method` and `capture_lambda_handler` now support **capture_response=False** parameter to prevent Tracer to capture response as metadata to allow customers running Tracer with sensitive workloads ### Fixed -- **Metrics**: Cold start metric is now completely separate from application metrics dimensions, making it easier and cheaper to visualize. - - This is a breaking change if you were graphing/alerting on both application metrics with the same name to compensate this previous malfunctioning - - Marked as bugfix as this is the intended behaviour since the beginning, as you shouldn't have the same application metric with different dimensions -- **Utilities**: SSMProvider within Parameters utility now have decrypt and recursive parameters correctly defined to support autocompletion +* **Metrics**: Cold start metric is now completely separate from application metrics dimensions, making it easier and cheaper to visualize. + * This is a breaking change if you were graphing/alerting on both application metrics with the same name to compensate this previous malfunctioning + * Marked as bugfix as this is the intended behaviour since the beginning, as you shouldn't have the same application metric with different dimensions +* **Utilities**: SSMProvider within Parameters utility now have decrypt and recursive parameters correctly defined to support autocompletion ### Added -- **Tracer**: capture_lambda_handler and capture_method decorators now support `capture_response` parameter to not include function's response as part of tracing metadata +* **Tracer**: capture_lambda_handler and capture_method decorators now support `capture_response` parameter to not include function's response as part of tracing metadata ## 1.3.1 - 2020-08-22 + ### Fixed -- **Tracer**: capture_method decorator did not properly handle nested context managers +* **Tracer**: capture_method decorator did not properly handle nested context managers ## 1.3.0 - 2020-08-21 + ### Added -- **Utilities**: Add new `parameters` utility to retrieve a single or multiple parameters from SSM Parameter Store, Secrets Manager, DynamoDB, or your very own +* **Utilities**: Add new `parameters` utility to retrieve a single or multiple parameters from SSM Parameter Store, Secrets Manager, DynamoDB, or your very own ## 1.2.0 - 2020-08-20 + ### Added -- **Tracer**: capture_method decorator now supports generator functions (including context managers) +* **Tracer**: capture_method decorator now supports generator functions (including context managers) ## 1.1.3 - 2020-08-18 + ### Fixed -- **Logger**: Logs emitted twice, structured and unstructured, due to Lambda configuring the root handler +* **Logger**: Logs emitted twice, structured and unstructured, due to Lambda configuring the root handler ## 1.1.2 - 2020-08-16 + ### Fixed -- **Docs**: Clarify confusion on Tracer reuse and `auto_patch=False` statement -- **Logger**: Autocomplete for log statements in PyCharm +* **Docs**: Clarify confusion on Tracer reuse and `auto_patch=False` statement +* **Logger**: Autocomplete for log statements in PyCharm ## 1.1.1 - 2020-08-14 + ### Fixed -- **Logger**: Regression on `Logger` level not accepting `int` i.e. `Logger(level=logging.INFO)` +* **Logger**: Regression on `Logger` level not accepting `int` i.e. `Logger(level=logging.INFO)` ## 1.1.0 - 2020-08-14 + ### Added -- **Logger**: Support for logger inheritance with `child` parameter +* **Logger**: Support for logger inheritance with `child` parameter ### Fixed -- **Logger**: Log level is now case insensitive via params and env var +* **Logger**: Log level is now case insensitive via params and env var ## 1.0.2 - 2020-07-16 + ### Fixed -- **Tracer**: Correct AWS X-Ray SDK dependency to support 2.5.0 and higher +* **Tracer**: Correct AWS X-Ray SDK dependency to support 2.5.0 and higher ## 1.0.1 - 2020-07-06 + ### Fixed -- **Logger**: Fix a bug with `inject_lambda_context` causing existing Logger keys to be overridden if `structure_logs` was called before +* **Logger**: Fix a bug with `inject_lambda_context` causing existing Logger keys to be overridden if `structure_logs` was called before ## 1.0.0 - 2020-06-18 + ### Added -- **Metrics**: `add_metadata` method to add any metric metadata you'd like to ease finding metric related data via CloudWatch Logs -- Set status as General Availability +* **Metrics**: `add_metadata` method to add any metric metadata you'd like to ease finding metric related data via CloudWatch Logs +* Set status as General Availability ## 0.11.0 - 2020-06-08 + ### Added -- Imports can now be made from top level of module, e.g.: `from aws_lambda_powertools import Logger, Metrics, Tracer` +* Imports can now be made from top level of module, e.g.: `from aws_lambda_powertools import Logger, Metrics, Tracer` ### Fixed -- **Metrics**: Fix a bug with Metrics causing an exception to be thrown when logging metrics if dimensions were not explicitly added. +* **Metrics**: Fix a bug with Metrics causing an exception to be thrown when logging metrics if dimensions were not explicitly added. ### Changed -- **Metrics**: No longer throws exception by default in case no metrics are emitted when using the log_metrics decorator. +* **Metrics**: No longer throws exception by default in case no metrics are emitted when using the log_metrics decorator. ## 0.10.0 - 2020-06-08 + ### Added -- **Metrics**: `capture_cold_start_metric` parameter added to `log_metrics` decorator -- **Metrics**: Optional `namespace` and `service` parameters added to Metrics constructor to more closely resemble other core utils +* **Metrics**: `capture_cold_start_metric` parameter added to `log_metrics` decorator +* **Metrics**: Optional `namespace` and `service` parameters added to Metrics constructor to more closely resemble other core utils ### Changed -- **Metrics**: Default dimension is now created based on `service` parameter or `POWERTOOLS_SERVICE_NAME` env var +* **Metrics**: Default dimension is now created based on `service` parameter or `POWERTOOLS_SERVICE_NAME` env var ### Deprecated -- **Metrics**: `add_namespace` method deprecated in favor of using `namespace` parameter to Metrics constructor or `POWERTOOLS_METRICS_NAMESPACE` env var +* **Metrics**: `add_namespace` method deprecated in favor of using `namespace` parameter to Metrics constructor or `POWERTOOLS_METRICS_NAMESPACE` env var ## 0.9.5 - 2020-06-02 + ### Fixed -- **Metrics**: Coerce non-string dimension values to string -- **Logger**: Correct `cold_start`, `function_memory_size` values from string to bool and int respectively +* **Metrics**: Coerce non-string dimension values to string +* **Logger**: Correct `cold_start`, `function_memory_size` values from string to bool and int respectively ## 0.9.4 - 2020-05-29 + ### Fixed -- **Metrics**: Fix issue where metrics were not correctly flushed, and cleared on every invocation +* **Metrics**: Fix issue where metrics were not correctly flushed, and cleared on every invocation ## 0.9.3 - 2020-05-16 + ### Fixed -- **Tracer**: Fix Runtime Error for nested sync due to incorrect loop usage +* **Tracer**: Fix Runtime Error for nested sync due to incorrect loop usage ## 0.9.2 - 2020-05-14 + ### Fixed -- **Tracer**: Import aiohttp lazily so it's not a hard dependency +* **Tracer**: Import aiohttp lazily so it's not a hard dependency ## 0.9.0 - 2020-05-12 + ### Added -- **Tracer**: Support for async functions in `Tracer` via `capture_method` decorator -- **Tracer**: Support for `aiohttp` via `aiohttp_trace_config` trace config -- **Tracer**: Support for patching specific modules via `patch_modules` param -- **Tracer**: Document escape hatch mechanisms via `tracer.provider` +* **Tracer**: Support for async functions in `Tracer` via `capture_method` decorator +* **Tracer**: Support for `aiohttp` via `aiohttp_trace_config` trace config +* **Tracer**: Support for patching specific modules via `patch_modules` param +* **Tracer**: Document escape hatch mechanisms via `tracer.provider` ## 0.8.1 - 2020-05-1 + ### Fixed + * **Metrics**: Fix metric unit casting logic if one passes plain string (value or key) * **Metrics:**: Fix `MetricUnit` enum values for - - `BytesPerSecond` - - `KilobytesPerSecond` - - `MegabytesPerSecond` - - `GigabytesPerSecond` - - `TerabytesPerSecond` - - `BitsPerSecond` - - `KilobitsPerSecond` - - `MegabitsPerSecond` - - `GigabitsPerSecond` - - `TerabitsPerSecond` - - `CountPerSecond` + * `BytesPerSecond` + * `KilobytesPerSecond` + * `MegabytesPerSecond` + * `GigabytesPerSecond` + * `TerabytesPerSecond` + * `BitsPerSecond` + * `KilobitsPerSecond` + * `MegabitsPerSecond` + * `GigabitsPerSecond` + * `TerabitsPerSecond` + * `CountPerSecond` ## 0.8.0 - 2020-04-24 + ### Added -- **Logger**: Introduced `Logger` class for structured logging as a replacement for `logger_setup` -- **Logger**: Introduced `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` +* **Logger**: Introduced `Logger` class for structured logging as a replacement for `logger_setup` +* **Logger**: Introduced `Logger.inject_lambda_context` decorator as a replacement for `logger_inject_lambda_context` ### Removed -- **Logger**: Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` +* **Logger**: Raise `DeprecationWarning` exception for both `logger_setup`, `logger_inject_lambda_context` ## 0.7.0 - 2020-04-20 + ### Added -- **Middleware factory**: Introduced Middleware Factory to build your own middleware via `lambda_handler_decorator` +* **Middleware factory**: Introduced Middleware Factory to build your own middleware via `lambda_handler_decorator` ### Fixed -- **Metrics**: Fixed metrics dimensions not being included correctly in EMF +* **Metrics**: Fixed metrics dimensions not being included correctly in EMF ## 0.6.3 - 2020-04-09 + ### Fixed -- **Logger**: Fix `log_metrics` decorator logic not calling the decorated function, and exception handling +* **Logger**: Fix `log_metrics` decorator logic not calling the decorated function, and exception handling ## 0.6.1 - 2020-04-08 + ### Added -- **Metrics**: Introduces Metrics middleware to utilise CloudWatch Embedded Metric Format +* **Metrics**: Introduces Metrics middleware to utilise CloudWatch Embedded Metric Format ### Deprecated -- **Metrics**: Added deprecation warning for `log_metrics` +* **Metrics**: Added deprecation warning for `log_metrics` ## 0.5.0 - 2020-02-20 + ### Added -- **Logger**: Introduced log sampling for debug - Thanks to [Danilo's contribution](https://github.com/awslabs/aws-lambda-powertools/pull/7) +* **Logger**: Introduced log sampling for debug - Thanks to [Danilo's contribution](https://github.com/awslabs/aws-lambda-powertools/pull/7) ## 0.1.0 - 2019-11-15 + ### Added -- Public beta release +* Public beta release diff --git a/Makefile b/Makefile index 0ee0ee76fbd..6173e3e310d 100644 --- a/Makefile +++ b/Makefile @@ -10,11 +10,17 @@ dev: pre-commit install format: - poetry run isort aws_lambda_powertools tests - poetry run black aws_lambda_powertools tests + poetry run isort aws_lambda_powertools tests examples + poetry run black aws_lambda_powertools tests examples lint: format - poetry run flake8 aws_lambda_powertools/* tests/* + poetry run flake8 aws_lambda_powertools tests examples + +lint-docs: + docker run -v ${PWD}:/markdown 06kellyjac/markdownlint-cli "docs" + +lint-docs-fix: + docker run -v ${PWD}:/markdown 06kellyjac/markdownlint-cli --fix "docs" test: poetry run pytest -m "not perf" --cov=aws_lambda_powertools --cov-report=xml @@ -29,7 +35,7 @@ coverage-html: pre-commit: pre-commit run --show-diff-on-failure -pr: lint mypy pre-commit test security-baseline complexity-baseline +pr: lint lint-docs mypy pre-commit test security-baseline complexity-baseline build: pr poetry build diff --git a/aws_lambda_powertools/logging/formatter.py b/aws_lambda_powertools/logging/formatter.py index becfc9de85c..1f01015051c 100644 --- a/aws_lambda_powertools/logging/formatter.py +++ b/aws_lambda_powertools/logging/formatter.py @@ -1,3 +1,4 @@ +import inspect import json import logging import os @@ -286,3 +287,7 @@ def _strip_none_records(records: Dict[str, Any]) -> Dict[str, Any]: JsonFormatter = LambdaPowertoolsFormatter # alias to previous formatter + + +# Fetch current and future parameters from PowertoolsFormatter that should be reserved +RESERVED_FORMATTER_CUSTOM_KEYS: List[str] = inspect.getfullargspec(LambdaPowertoolsFormatter).args[1:] diff --git a/aws_lambda_powertools/logging/logger.py b/aws_lambda_powertools/logging/logger.py index 05fd6c98e04..f70224cabae 100644 --- a/aws_lambda_powertools/logging/logger.py +++ b/aws_lambda_powertools/logging/logger.py @@ -12,7 +12,7 @@ from ..shared.functions import resolve_env_var_choice, resolve_truthy_env_var_choice from .exceptions import InvalidLoggerSamplingRateError from .filters import SuppressFilter -from .formatter import BasePowertoolsFormatter, LambdaPowertoolsFormatter +from .formatter import RESERVED_FORMATTER_CUSTOM_KEYS, BasePowertoolsFormatter, LambdaPowertoolsFormatter from .lambda_context import build_lambda_context_model logger = logging.getLogger(__name__) @@ -82,7 +82,7 @@ class Logger(logging.Logger): # lgtm [py/missing-call-to-init] datefmt: str, optional String directives (strftime) to format log timestamp using `time`, by default it uses RFC 3339. - use_datetime_directive: str, optional + use_datetime_directive: bool, optional Interpret `datefmt` as a format string for `datetime.datetime.strftime`, rather than `time.strftime`. @@ -335,7 +335,7 @@ def handler(event, context): ) @functools.wraps(lambda_handler) - def decorate(event, context, **kwargs): + def decorate(event, context, *args, **kwargs): lambda_context = build_lambda_context_model(context) cold_start = _is_cold_start() @@ -351,7 +351,7 @@ def decorate(event, context, **kwargs): logger.debug("Event received") self.info(getattr(event, "raw_event", event)) - return lambda_handler(event, context) + return lambda_handler(event, context, *args, **kwargs) return decorate @@ -368,7 +368,7 @@ def registered_handler(self) -> logging.Handler: return handlers[0] @property - def registered_formatter(self) -> PowertoolsFormatter: + def registered_formatter(self) -> BasePowertoolsFormatter: """Convenience property to access logger formatter""" return self.registered_handler.formatter # type: ignore @@ -395,7 +395,15 @@ def structure_logs(self, append: bool = False, **keys): is_logger_preconfigured = getattr(self._logger, "init", False) if not is_logger_preconfigured: formatter = self.logger_formatter or LambdaPowertoolsFormatter(**log_keys) # type: ignore - return self.registered_handler.setFormatter(formatter) + self.registered_handler.setFormatter(formatter) + + # when using a custom Lambda Powertools Formatter + # standard and custom keys that are not Powertools Formatter parameters should be appended + # and custom keys that might happen to be Powertools Formatter parameters should be discarded + # this prevents adding them as custom keys, for example, `json_default=` + # see https://github.com/awslabs/aws-lambda-powertools-python/issues/1263 + custom_keys = {k: v for k, v in log_keys.items() if k not in RESERVED_FORMATTER_CUSTOM_KEYS} + return self.registered_formatter.append_keys(**custom_keys) # Mode 2 (legacy) if append: diff --git a/docs/changelog.md b/docs/changelog.md index c2705ba58cb..e313bea09a9 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,2 +1,4 @@ [comment]: <> (Includes Changelog content entire file as a snippet) + + --8<-- "CHANGELOG.md" diff --git a/docs/core/event_handler/api_gateway.md b/docs/core/event_handler/api_gateway.md index 4f86dc8fdf3..cf99b615a80 100644 --- a/docs/core/event_handler/api_gateway.md +++ b/docs/core/event_handler/api_gateway.md @@ -5,7 +5,7 @@ description: Core utility Event handler for Amazon API Gateway REST and HTTP APIs, and Application Loader Balancer (ALB). -### Key Features +## Key Features * Lightweight routing to reduce boilerplate for API Gateway REST/HTTP API and ALB * Seamless support for CORS, binary and Gzip compression @@ -18,49 +18,54 @@ Event handler for Amazon API Gateway REST and HTTP APIs, and Application Loader ### Required resources -You must have an existing [API Gateway Proxy integration](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html){target="_blank"} or [ALB](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html){target="_blank"} configured to invoke your Lambda function. There is no additional permissions or dependencies required to use this utility. +You must have an existing [API Gateway Proxy integration](https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html){target="_blank"} or [ALB](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html){target="_blank"} configured to invoke your Lambda function. This is the sample infrastructure for API Gateway we are using for the examples in this documentation. +???+ info "There is no additional permissions or dependencies required to use this utility." + ```yaml title="AWS Serverless Application Model (SAM) example" -AWSTemplateFormatVersion: '2010-09-09' +AWSTemplateFormatVersion: "2010-09-09" Transform: AWS::Serverless-2016-10-31 Description: Hello world event handler API Gateway Globals: - Api: - TracingEnabled: true - Cors: # see CORS section - AllowOrigin: "'https://example.com'" - AllowHeaders: "'Content-Type,Authorization,X-Amz-Date'" - MaxAge: "'300'" - BinaryMediaTypes: # see Binary responses section - - '*~1*' # converts to */* for any binary type - Function: - Timeout: 5 - Runtime: python3.8 - Tracing: Active - Environment: - Variables: - LOG_LEVEL: INFO - POWERTOOLS_LOGGER_SAMPLE_RATE: 0.1 - POWERTOOLS_LOGGER_LOG_EVENT: true - POWERTOOLS_METRICS_NAMESPACE: MyServerlessApplication - POWERTOOLS_SERVICE_NAME: my_api-service + Api: + TracingEnabled: true + Cors: # see CORS section + AllowOrigin: "'https://example.com'" + AllowHeaders: "'Content-Type,Authorization,X-Amz-Date'" + MaxAge: "'300'" + BinaryMediaTypes: # see Binary responses section + - "*~1*" # converts to */* for any binary type + Function: + Timeout: 5 + Runtime: python3.8 + Tracing: Active + Environment: + Variables: + LOG_LEVEL: INFO + POWERTOOLS_LOGGER_SAMPLE_RATE: 0.1 + POWERTOOLS_LOGGER_LOG_EVENT: true + POWERTOOLS_METRICS_NAMESPACE: MyServerlessApplication + POWERTOOLS_SERVICE_NAME: my_api-service Resources: - ApiFunction: - Type: AWS::Serverless::Function - Properties: - Handler: app.lambda_handler - CodeUri: api_handler/ - Description: API handler function - Events: - ApiEvent: - Type: Api - Properties: - Path: /{proxy+} # Send requests on any path to the lambda function - Method: ANY # Send requests using any http method to the lambda function + ApiFunction: + Type: AWS::Serverless::Function + Properties: + Handler: app.lambda_handler + CodeUri: api_handler/ + Description: API handler function + Events: + ApiEvent: + Type: Api + Properties: + # NOTE: this is a catch-all rule to simply the documentation. + # explicit routes and methods are recommended for prod instead + # for example, Path: /hello, Method: GET + Path: /{proxy+} # Send requests on any path to the lambda function + Method: ANY # Send requests using any http method to the lambda function ``` ### Event Resolvers @@ -355,7 +360,9 @@ You can also combine nested paths with greedy regex to catch in between routes. ... } ``` + ### HTTP Methods + You can use named decorators to specify the HTTP method that should be handled in your functions. As well as the `get` method already shown above, you can use `post`, `put`, `patch`, `delete`, and `patch`. @@ -487,7 +494,6 @@ def lambda_handler(event, context): return app.resolve(event, context) ``` - ### Handling not found routes By default, we return `404` for any unmatched route. @@ -528,7 +534,6 @@ def lambda_handler(event, context): return app.resolve(event, context) ``` - ### Exception handling You can use **`exception_handler`** decorator with any Python exception. This allows you to handle a common exception outside your route, for example validation errors. @@ -754,13 +759,13 @@ For convenience, these are the default values when using `CORSConfig` to enable ???+ warning Always configure `allow_origin` when using in production. -Key | Value | Note -------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- -**[allow_origin](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin){target="_blank"}**: `str` | `*` | Only use the default value for development. **Never use `*` for production** unless your use case requires it -**[allow_headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers){target="_blank"}**: `List[str]` | `[Authorization, Content-Type, X-Amz-Date, X-Api-Key, X-Amz-Security-Token]` | Additional headers will be appended to the default list for your convenience -**[expose_headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers){target="_blank"}**: `List[str]` | `[]` | Any additional header beyond the [safe listed by CORS specification](https://developer.mozilla.org/en-US/docs/Glossary/CORS-safelisted_response_header){target="_blank"}. -**[max_age](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age){target="_blank"}**: `int` | `` | Only for pre-flight requests if you choose to have your function to handle it instead of API Gateway -**[allow_credentials](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials){target="_blank"}**: `bool` | `False` | Only necessary when you need to expose cookies, authorization headers or TLS client certificates. +| Key | Value | Note | +| -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| **[allow_origin](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin){target="_blank"}**: `str` | `*` | Only use the default value for development. **Never use `*` for production** unless your use case requires it | +| **[allow_headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Headers){target="_blank"}**: `List[str]` | `[Authorization, Content-Type, X-Amz-Date, X-Api-Key, X-Amz-Security-Token]` | Additional headers will be appended to the default list for your convenience | +| **[expose_headers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers){target="_blank"}**: `List[str]` | `[]` | Any additional header beyond the [safe listed by CORS specification](https://developer.mozilla.org/en-US/docs/Glossary/CORS-safelisted_response_header){target="_blank"}. | +| **[max_age](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age){target="_blank"}**: `int` | `` | Only for pre-flight requests if you choose to have your function to handle it instead of API Gateway | +| **[allow_credentials](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials){target="_blank"}**: `bool` | `False` | Only necessary when you need to expose cookies, authorization headers or TLS client certificates. | ### Fine grained responses @@ -1132,7 +1137,6 @@ This sample project contains a Users function with two distinct set of routes, ` === "Project layout" - ```python hl_lines="1 8 10 12-15" . ├── Pipfile # project app & dev dependencies; poetry, pipenv, etc. @@ -1308,7 +1312,7 @@ _**Downsides**_ * **Cold starts**. Frequent deployments and/or high load can diminish the benefit of monolithic functions depending on your latency requirements, due to [Lambda scaling model](https://docs.aws.amazon.com/lambda/latest/dg/invocation-scaling.html){target="_blank"}. Always load test to pragmatically balance between your customer experience and development cognitive load. * **Granular security permissions**. The micro function approach enables you to use fine-grained permissions & access controls, separate external dependencies & code signing at the function level. Conversely, you could have multiple functions while duplicating the final code artifact in a monolithic approach. - - Regardless, least privilege can be applied to either approaches. + * Regardless, least privilege can be applied to either approaches. * **Higher risk per deployment**. A misconfiguration or invalid import can cause disruption if not caught earlier in automated testing. Multiple functions can mitigate misconfigurations but they would still share the same code artifact. You can further minimize risks with multiple environments in your CI/CD pipeline. #### Micro function @@ -1317,20 +1321,20 @@ _**Downsides**_ A micro function means that your final code artifact will be different to each function deployed. This is generally the approach to start if you're looking for fine-grain control and/or high load on certain parts of your service. -_**Benefits**_ +**Benefits** * **Granular scaling**. A micro function can benefit from the [Lambda scaling model](https://docs.aws.amazon.com/lambda/latest/dg/invocation-scaling.html){target="_blank"} to scale differently depending on each part of your application. Concurrency controls and provisioned concurrency can also be used at a granular level for capacity management. * **Discoverability**. Micro functions are easier do visualize when using distributed tracing. Their high-level architectures can be self-explanatory, and complexity is highly visible — assuming each function is named to the business purpose it serves. * **Package size**. An independent function can be significant smaller (KB vs MB) depending on external dependencies it require to perform its purpose. Conversely, a monolithic approach can benefit from [Lambda Layers](https://docs.aws.amazon.com/lambda/latest/dg/invocation-layers.html){target="_blank"} to optimize builds for external dependencies. -_**Downsides**_ +**Downsides** -* **Upfront investment**. Python ecosystem doesn't use a bundler — you need a custom build tooling to ensure each function only has what it needs and account for [C bindings for runtime compatibility](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html){target="_blank"}. Operations become more elaborate — you need to standardize tracing labels/annotations, structured logging, and metrics to pinpoint root causes. - - Engineering discipline is necessary for both approaches. Micro-function approach however requires further attention in consistency as the number of functions grow, just like any distributed system. +* **Upfront investment**. You need custom build tooling to bundle assets, including [C bindings for runtime compatibility](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html){target="_blank"}. `Operations become more elaborate — you need to standardize tracing labels/annotations, structured logging, and metrics to pinpoint root causes. + * Engineering discipline is necessary for both approaches. Micro-function approach however requires further attention in consistency as the number of functions grow, just like any distributed system. * **Harder to share code**. Shared code must be carefully evaluated to avoid unnecessary deployments when that changes. Equally, if shared code isn't a library, your development, building, deployment tooling need to accommodate the distinct layout. * **Slower safe deployments**. Safely deploying multiple functions require coordination — AWS CodeDeploy deploys and verifies each function sequentially. This increases lead time substantially (minutes to hours) depending on the deployment strategy you choose. You can mitigate it by selectively enabling it in prod-like environments only, and where the risk profile is applicable. - - Automated testing, operational and security reviews are essential to stability in either approaches. + * Automated testing, operational and security reviews are essential to stability in either approaches. ## Testing your code diff --git a/docs/core/event_handler/appsync.md b/docs/core/event_handler/appsync.md index 95457aa7736..f3203e37834 100644 --- a/docs/core/event_handler/appsync.md +++ b/docs/core/event_handler/appsync.md @@ -770,7 +770,6 @@ Let's assume you have `app.py` as your Lambda function entrypoint and routes in app.resolve(event, context) ``` - ## Testing your code You can test your resolvers by passing a mocked or actual AppSync Lambda event that you're expecting. diff --git a/docs/core/logger.md b/docs/core/logger.md index 0edc4aa3ba7..23d57e251b9 100644 --- a/docs/core/logger.md +++ b/docs/core/logger.md @@ -16,50 +16,30 @@ Logger provides an opinionated logger with output structured as JSON. Logger requires two settings: -Setting | Description | Environment variable | Constructor parameter -------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- -**Logging level** | Sets how verbose Logger should be (INFO, by default) | `LOG_LEVEL` | `level` -**Service** | Sets **service** key that will be present across all log statements | `POWERTOOLS_SERVICE_NAME` | `service` - -???+ example - **AWS Serverless Application Model (SAM)** - -=== "template.yaml" - - ```yaml hl_lines="9 10" - Resources: - HelloWorldFunction: - Type: AWS::Serverless::Function - Properties: - Runtime: python3.8 - Environment: - Variables: - LOG_LEVEL: INFO - POWERTOOLS_SERVICE_NAME: example - ``` -=== "app.py" +| Setting | Description | Environment variable | Constructor parameter | +| ----------------- | ------------------------------------------------------------------- | ------------------------- | --------------------- | +| **Logging level** | Sets how verbose Logger should be (INFO, by default) | `LOG_LEVEL` | `level` | +| **Service** | Sets **service** key that will be present across all log statements | `POWERTOOLS_SERVICE_NAME` | `service` | - ```python hl_lines="2 4" - from aws_lambda_powertools import Logger - logger = Logger() # Sets service via env var - # OR logger = Logger(service="example") - ``` +```yaml hl_lines="12-13" title="AWS Serverless Application Model (SAM) example" +--8<-- "examples/logger/sam/template.yaml" +``` ### Standard structured keys Your Logger will include the following keys to your structured logging: -Key | Example | Note -------------------------------------------------- | ------------------------------------------------- | --------------------------------------------------------------------------------- -**level**: `str` | `INFO` | Logging level -**location**: `str` | `collect.handler:1` | Source code location where statement was executed -**message**: `Any` | `Collecting payment` | Unserializable JSON values are casted as `str` -**timestamp**: `str` | `2021-05-03 10:20:19,650+0200` | Timestamp with milliseconds, by default uses local timezone -**service**: `str` | `payment` | Service name defined, by default `service_undefined` -**xray_trace_id**: `str` | `1-5759e988-bd862e3fe1be46a994272793` | When [tracing is enabled](https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html){target="_blank"}, it shows X-Ray Trace ID -**sampling_rate**: `float` | `0.1` | When enabled, it shows sampling rate in percentage e.g. 10% -**exception_name**: `str` | `ValueError` | When `logger.exception` is used and there is an exception -**exception**: `str` | `Traceback (most recent call last)..` | When `logger.exception` is used and there is an exception +| Key | Example | Note | +| -------------------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ | +| **level**: `str` | `INFO` | Logging level | +| **location**: `str` | `collect.handler:1` | Source code location where statement was executed | +| **message**: `Any` | `Collecting payment` | Unserializable JSON values are casted as `str` | +| **timestamp**: `str` | `2021-05-03 10:20:19,650+0200` | Timestamp with milliseconds, by default uses local timezone | +| **service**: `str` | `payment` | Service name defined, by default `service_undefined` | +| **xray_trace_id**: `str` | `1-5759e988-bd862e3fe1be46a994272793` | When [tracing is enabled](https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html){target="_blank"}, it shows X-Ray Trace ID | +| **sampling_rate**: `float` | `0.1` | When enabled, it shows sampling rate in percentage e.g. 10% | +| **exception_name**: `str` | `ValueError` | When `logger.exception` is used and there is an exception | +| **exception**: `str` | `Traceback (most recent call last)..` | When `logger.exception` is used and there is an exception | ### Capturing Lambda context info @@ -67,83 +47,38 @@ You can enrich your structured logs with key Lambda context information via `inj === "collect.py" - ```python hl_lines="5" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - @logger.inject_lambda_context - def handler(event, context): - logger.info("Collecting payment") - - # You can log entire objects too - logger.info({ - "operation": "collect_payment", - "charge_id": event['charge_id'] - }) - ... + ```python hl_lines="7" + --8<-- "examples/logger/src/inject_lambda_context.py" ``` === "Example CloudWatch Logs excerpt" - ```json hl_lines="7-11 16-19" - { - "level": "INFO", - "location": "collect.handler:7", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" - }, - { - "level": "INFO", - "location": "collect.handler:10", - "message": { - "operation": "collect_payment", - "charge_id": "ch_AZFlk2345C0" - }, - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" - } + ```json hl_lines="8-12 17-20" + --8<-- "examples/logger/src/inject_lambda_context_output.json" ``` When used, this will include the following keys: -Key | Example -------------------------------------------------- | --------------------------------------------------------------------------------- -**cold_start**: `bool` | `false` -**function_name** `str` | `example-powertools-HelloWorldFunction-1P1Z6B39FLU73` -**function_memory_size**: `int` | `128` -**function_arn**: `str` | `arn:aws:lambda:eu-west-1:012345678910:function:example-powertools-HelloWorldFunction-1P1Z6B39FLU73` -**function_request_id**: `str` | `899856cb-83d1-40d7-8611-9e78f15f32f4` +| Key | Example | +| ------------------------------- | ---------------------------------------------------------------------------------------------------- | +| **cold_start**: `bool` | `false` | +| **function_name** `str` | `example-powertools-HelloWorldFunction-1P1Z6B39FLU73` | +| **function_memory_size**: `int` | `128` | +| **function_arn**: `str` | `arn:aws:lambda:eu-west-1:012345678910:function:example-powertools-HelloWorldFunction-1P1Z6B39FLU73` | +| **function_request_id**: `str` | `899856cb-83d1-40d7-8611-9e78f15f32f4` | -#### Logging incoming event +### Logging incoming event When debugging in non-production environments, you can instruct Logger to log the incoming event with `log_event` param or via `POWERTOOLS_LOGGER_LOG_EVENT` env var. ???+ warning This is disabled by default to prevent sensitive info being logged -```python hl_lines="5" title="Logging incoming event" -from aws_lambda_powertools import Logger - -logger = Logger(service="payment") - -@logger.inject_lambda_context(log_event=True) -def handler(event, context): - ... +```python hl_lines="7" title="Logging incoming event" +--8<-- "examples/logger/src/log_incoming_event.py" ``` -#### Setting a Correlation ID +### Setting a Correlation ID You can set a Correlation ID using `correlation_id_path` param by passing a [JMESPath expression](https://jmespath.org/tutorial.html){target="_blank"}. @@ -152,87 +87,63 @@ You can set a Correlation ID using `correlation_id_path` param by passing a [JME === "collect.py" - ```python hl_lines="5" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - @logger.inject_lambda_context(correlation_id_path="headers.my_request_id_header") - def handler(event, context): - logger.debug(f"Correlation ID => {logger.get_correlation_id()}") - logger.info("Collecting payment") + ```python hl_lines="7" + --8<-- "examples/logger/src/set_correlation_id.py" ``` === "Example Event" ```json hl_lines="3" - { - "headers": { - "my_request_id_header": "correlation_id_value" - } - } + --8<-- "examples/logger/src/set_correlation_id_event.json" ``` === "Example CloudWatch Logs excerpt" ```json hl_lines="12" - { - "level": "INFO", - "location": "collect.handler:7", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", - "correlation_id": "correlation_id_value" - } + --8<-- "examples/logger/src/set_correlation_id_output.json" ``` -We provide [built-in JMESPath expressions](#built-in-correlation-id-expressions) for known event sources, where either a request ID or X-Ray Trace ID are present. +#### set_correlation_id method + +You can also use `set_correlation_id` method to inject it anywhere else in your code. Example below uses [Event Source Data Classes utility](../utilities/data_classes.md) to easily access events properties. === "collect.py" - ```python hl_lines="2 6" - from aws_lambda_powertools import Logger - from aws_lambda_powertools.logging import correlation_paths + ```python hl_lines="11" + --8<-- "examples/logger/src/set_correlation_id_method.py" + ``` +=== "Example Event" - logger = Logger(service="payment") + ```json hl_lines="3" + --8<-- "examples/logger/src/set_correlation_id_method_event.json" + ``` - @logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST) - def handler(event, context): - logger.debug(f"Correlation ID => {logger.get_correlation_id()}") - logger.info("Collecting payment") +=== "Example CloudWatch Logs excerpt" + + ```json hl_lines="7" + --8<-- "examples/logger/src/set_correlation_id_method_output.json" + ``` + +#### Known correlation IDs + +To ease routine tasks like extracting correlation ID from popular event sources, we provide [built-in JMESPath expressions](#built-in-correlation-id-expressions). + +=== "collect.py" + + ```python hl_lines="2 8" + --8<-- "examples/logger/src/set_correlation_id_jmespath.py" ``` === "Example Event" ```json hl_lines="3" - { - "requestContext": { - "requestId": "correlation_id_value" - } - } + --8<-- "examples/logger/src/set_correlation_id_jmespath_event.json" ``` === "Example CloudWatch Logs excerpt" ```json hl_lines="12" - { - "level": "INFO", - "location": "collect.handler:8", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", - "correlation_id": "correlation_id_value" - } + --8<-- "examples/logger/src/set_correlation_id_jmespath_output.json" ``` ### Appending additional keys @@ -254,30 +165,13 @@ You can append your own keys to your existing Logger via `append_keys(**addition === "collect.py" - ```python hl_lines="9" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - def handler(event, context): - order_id = event.get("order_id") - - # this will ensure order_id key always has the latest value before logging - logger.append_keys(order_id=order_id) - - logger.info("Collecting payment") + ```python hl_lines="12" + --8<-- "examples/logger/src/append_keys.py" ``` === "Example CloudWatch Logs excerpt" ```json hl_lines="7" - { - "level": "INFO", - "location": "collect.handler:11", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "order_id": "order_id_value" - } + --8<-- "examples/logger/src/append_keys_output.json" ``` ???+ tip "Tip: Logger will automatically reject any key with a None value" @@ -296,103 +190,13 @@ It accepts any dictionary, and all keyword arguments will be added as part of th === "extra_parameter.py" - ```python hl_lines="6" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - fields = { "request_id": "1123" } - logger.info("Collecting payment", extra=fields) - ``` -=== "Example CloudWatch Logs excerpt" - - ```json hl_lines="7" - { - "level": "INFO", - "location": "collect.handler:6", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "request_id": "1123" - } - ``` - -#### set_correlation_id method - -You can set a correlation_id to your existing Logger via `set_correlation_id(value)` method by passing any string value. - -=== "collect.py" - - ```python hl_lines="6" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - def handler(event, context): - logger.set_correlation_id(event["requestContext"]["requestId"]) - logger.info("Collecting payment") - ``` - -=== "Example Event" - - ```json hl_lines="3" - { - "requestContext": { - "requestId": "correlation_id_value" - } - } - ``` - -=== "Example CloudWatch Logs excerpt" - - ```json hl_lines="7" - { - "level": "INFO", - "location": "collect.handler:7", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "correlation_id": "correlation_id_value" - } - ``` - -Alternatively, you can combine [Data Classes utility](../utilities/data_classes.md) with Logger to use dot notation object: - -=== "collect.py" - - ```python hl_lines="2 7-8" - from aws_lambda_powertools import Logger - from aws_lambda_powertools.utilities.data_classes import APIGatewayProxyEvent - - logger = Logger(service="payment") - - def handler(event, context): - event = APIGatewayProxyEvent(event) - logger.set_correlation_id(event.request_context.request_id) - logger.info("Collecting payment") - ``` -=== "Example Event" - - ```json hl_lines="3" - { - "requestContext": { - "requestId": "correlation_id_value" - } - } + ```python hl_lines="9" + --8<-- "examples/logger/src/append_keys_extra.py" ``` - === "Example CloudWatch Logs excerpt" ```json hl_lines="7" - { - "timestamp": "2020-05-24 18:17:33,774", - "level": "INFO", - "location": "collect.handler:9", - "service": "payment", - "sampling_rate": 0.0, - "correlation_id": "correlation_id_value", - "message": "Collecting payment" - } + --8<-- "examples/logger/src/append_keys_extra_output.json" ``` ### Removing additional keys @@ -401,37 +205,14 @@ You can remove any additional key from Logger state using `remove_keys`. === "collect.py" - ```python hl_lines="9" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - def handler(event, context): - logger.append_keys(sample_key="value") - logger.info("Collecting payment") - - logger.remove_keys(["sample_key"]) - logger.info("Collecting payment without sample key") + ```python hl_lines="11" + --8<-- "examples/logger/src/remove_keys.py" ``` === "Example CloudWatch Logs excerpt" ```json hl_lines="7" - { - "level": "INFO", - "location": "collect.handler:7", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "sample_key": "value" - }, - { - "level": "INFO", - "location": "collect.handler:10", - "message": "Collecting payment without sample key", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment" - } + --8<-- "examples/logger/src/remove_keys_output.json" ``` #### Clearing all state @@ -450,54 +231,20 @@ Logger is commonly initialized in the global scope. Due to [Lambda Execution Con === "collect.py" - ```python hl_lines="5 8" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - @logger.inject_lambda_context(clear_state=True) - def handler(event, context): - if event.get("special_key"): - # Should only be available in the first request log - # as the second request doesn't contain `special_key` - logger.append_keys(debugging_key="value") - - logger.info("Collecting payment") + ```python hl_lines="7 10" + --8<-- "examples/logger/src/clear_state.py" ``` === "#1 request" ```json hl_lines="7" - { - "level": "INFO", - "location": "collect.handler:10", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "special_key": "debug_key", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" - } + --8<-- "examples/logger/src/clear_state_event_one.json" ``` === "#2 request" ```json hl_lines="7" - { - "level": "INFO", - "location": "collect.handler:10", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": false, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" - } + --8<-- "examples/logger/src/clear_state_event_two.json" ``` ### Logging exceptions @@ -509,29 +256,14 @@ Use `logger.exception` method to log contextual information about exceptions. Lo === "collect.py" - ```python hl_lines="8" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - try: - raise ValueError("something went wrong") - except Exception: - logger.exception("Received an exception") + ```python hl_lines="15" + --8<-- "examples/logger/src/logging_exceptions.py" ``` === "Example CloudWatch Logs excerpt" ```json hl_lines="7-8" - { - "level": "ERROR", - "location": "collect.handler:5", - "message": "Received an exception", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "exception_name": "ValueError", - "exception": "Traceback (most recent call last):\n File \"\", line 2, in \nValueError: something went wrong" - } + --8<-- "examples/logger/src/logging_exceptions_output.json" ``` ## Advanced @@ -543,52 +275,54 @@ You can use any of the following built-in JMESPath expressions as part of [injec ???+ note "Note: Any object key named with `-` must be escaped" For example, **`request.headers."x-amzn-trace-id"`**. -Name | Expression | Description -------------------------------------------------- | ------------------------------------------------- | --------------------------------------------------------------------------------- -**API_GATEWAY_REST** | `"requestContext.requestId"` | API Gateway REST API request ID -**API_GATEWAY_HTTP** | `"requestContext.requestId"` | API Gateway HTTP API request ID -**APPSYNC_RESOLVER** | `'request.headers."x-amzn-trace-id"'` | AppSync X-Ray Trace ID -**APPLICATION_LOAD_BALANCER** | `'headers."x-amzn-trace-id"'` | ALB X-Ray Trace ID -**EVENT_BRIDGE** | `"id"` | EventBridge Event ID +| Name | Expression | Description | +| ----------------------------- | ------------------------------------- | ------------------------------- | +| **API_GATEWAY_REST** | `"requestContext.requestId"` | API Gateway REST API request ID | +| **API_GATEWAY_HTTP** | `"requestContext.requestId"` | API Gateway HTTP API request ID | +| **APPSYNC_RESOLVER** | `'request.headers."x-amzn-trace-id"'` | AppSync X-Ray Trace ID | +| **APPLICATION_LOAD_BALANCER** | `'headers."x-amzn-trace-id"'` | ALB X-Ray Trace ID | +| **EVENT_BRIDGE** | `"id"` | EventBridge Event ID | ### Reusing Logger across your code -Logger supports inheritance via `child` parameter. This allows you to create multiple Loggers across your code base, and propagate changes such as new keys to all Loggers. +Similar to [Tracer](./tracer.md#reusing-tracer-across-your-code), a new instance that uses the same `service` name - env var or explicit parameter - will reuse a previous Logger instance. Just like `logging.getLogger("logger_name")` would in the standard library if called with the same logger name. + +Notice in the CloudWatch Logs output how `payment_id` appeared as expected when logging in `collect.py`. === "collect.py" - ```python hl_lines="1 7" - import shared # Creates a child logger named "payment.shared" - from aws_lambda_powertools import Logger + ```python hl_lines="1 9 11 12" + --8<-- "examples/logger/src/logger_reuse.py" + ``` - logger = Logger() # POWERTOOLS_SERVICE_NAME: "payment" +=== "payment.py" - def handler(event, context): - shared.inject_payment_id(event) - ... + ```python hl_lines="3 7" + --8<-- "examples/logger/src/logger_reuse_payment.py" ``` -=== "shared.py" +=== "Example CloudWatch Logs excerpt" - ```python hl_lines="6" - from aws_lambda_powertools import Logger + ```json hl_lines="12" + --8<-- "examples/logger/src/logger_reuse_output.json" + ``` - logger = Logger(child=True) # POWERTOOLS_SERVICE_NAME: "payment" +???+ note "Note: About Child Loggers" + Coming from standard library, you might be used to use `logging.getLogger(__name__)`. This will create a new instance of a Logger with a different name. - def inject_payment_id(event): - logger.structure_logs(append=True, payment_id=event.get("payment_id")) - ``` + In Powertools, you can have the same effect by using `child=True` parameter: `Logger(child=True)`. This creates a new Logger instance named after `service.`. All state changes will be propagated bi-directonally between Child and Parent. -In this example, `Logger` will create a parent logger named `payment` and a child logger named `payment.shared`. Changes in either parent or child logger will be propagated bi-directionally. + For that reason, there could be side effects depending on the order the Child Logger is instantiated, because Child Loggers don't have a handler. -???+ info "Info: Child loggers will be named after the following convention `{service}.{filename}`" - If you forget to use `child` param but the `service` name is the same of the parent, we will return the existing parent `Logger` instead. + For example, if you instantiated a Child Logger and immediately used `logger.append_keys/remove_keys/set_correlation_id` to update logging state, this might fail if the Parent Logger wasn't instantiated. + + In this scenario, you can either ensure any calls manipulating state are only called when a Parent Logger is instantiated (example above), or refrain from using `child=True` parameter altogether. ### Sampling debug logs Use sampling when you want to dynamically change your log level to **DEBUG** based on a **percentage of your concurrent/cold start invocations**. -You can use values ranging from `0.0` to `1` (100%) when setting `POWERTOOLS_LOGGER_SAMPLE_RATE` env var or `sample_rate` parameter in Logger. +You can use values ranging from `0.0` to `1` (100%) when setting `POWERTOOLS_LOGGER_SAMPLE_RATE` env var, or `sample_rate` parameter in Logger. ???+ tip "Tip: When is this useful?" Let's imagine a sudden spike increase in concurrency triggered a transient issue downstream. When looking into the logs you might not have enough information, and while you can adjust log levels it might not happen again. @@ -602,46 +336,14 @@ Sampling decision happens at the Logger initialization. This means sampling may === "collect.py" - ```python hl_lines="4 7" - from aws_lambda_powertools import Logger - - # Sample 10% of debug logs e.g. 0.1 - logger = Logger(service="payment", sample_rate=0.1) - - def handler(event, context): - logger.debug("Verifying whether order_id is present") - logger.info("Collecting payment") + ```python hl_lines="6 10" + --8<-- "examples/logger/src/logger_reuse.py" ``` === "Example CloudWatch Logs excerpt" - ```json hl_lines="2 4 12 15 25" - { - "level": "DEBUG", - "location": "collect.handler:7", - "message": "Verifying whether order_id is present", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", - "sampling_rate": 0.1 - }, - { - "level": "INFO", - "location": "collect.handler:7", - "message": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494+0200", - "service": "payment", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", - "sampling_rate": 0.1 - } + ```json hl_lines="3 5 13 16 25" + --8<-- "examples/logger/src/sampling_debug_logs_output.json" ``` ### LambdaPowertoolsFormatter @@ -650,23 +352,19 @@ Logger propagates a few formatting configurations to the built-in `LambdaPowerto If you prefer configuring it separately, or you'd want to bring this JSON Formatter to another application, these are the supported settings: -Parameter | Description | Default -------------------------------------------------- | ------------------------------------------------- | ------------------------------------------------- -**`json_serializer`** | function to serialize `obj` to a JSON formatted `str` | `json.dumps` -**`json_deserializer`** | function to deserialize `str`, `bytes`, `bytearray` containing a JSON document to a Python obj | `json.loads` -**`json_default`** | function to coerce unserializable values, when no custom serializer/deserializer is set | `str` -**`datefmt`** | string directives (strftime) to format log timestamp | `%Y-%m-%d %H:%M:%S,%F%z`, where `%F` is a custom ms directive -**`use_datetime_directive`** | format the `datefmt` timestamps using `datetime`, not `time` (also supports the custom `%F` directive for milliseconds) | `False` -**`utc`** | set logging timestamp to UTC | `False` -**`log_record_order`** | set order of log keys when logging | `["level", "location", "message", "timestamp"]` -**`kwargs`** | key-value to be included in log messages | `None` - -```python hl_lines="2 4-5" title="Pre-configuring Lambda Powertools Formatter" -from aws_lambda_powertools import Logger -from aws_lambda_powertools.logging.formatter import LambdaPowertoolsFormatter - -formatter = LambdaPowertoolsFormatter(utc=True, log_record_order=["message"]) -logger = Logger(service="example", logger_formatter=formatter) +| Parameter | Description | Default | +| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------- | +| **`json_serializer`** | function to serialize `obj` to a JSON formatted `str` | `json.dumps` | +| **`json_deserializer`** | function to deserialize `str`, `bytes`, `bytearray` containing a JSON document to a Python obj | `json.loads` | +| **`json_default`** | function to coerce unserializable values, when no custom serializer/deserializer is set | `str` | +| **`datefmt`** | string directives (strftime) to format log timestamp | `%Y-%m-%d %H:%M:%S,%F%z`, where `%F` is a custom ms directive | +| **`use_datetime_directive`** | format the `datefmt` timestamps using `datetime`, not `time` (also supports the custom `%F` directive for milliseconds) | `False` | +| **`utc`** | set logging timestamp to UTC | `False` | +| **`log_record_order`** | set order of log keys when logging | `["level", "location", "message", "timestamp"]` | +| **`kwargs`** | key-value to be included in log messages | `None` | + +```python hl_lines="2 7-8" title="Pre-configuring Lambda Powertools Formatter" +--8<-- "examples/logger/src/powertools_formatter_setup.py" ``` ### Migrating from other Loggers @@ -681,6 +379,8 @@ For Logger, the `service` is the logging key customers can use to search log ope #### Inheriting Loggers +??? tip "Tip: Prefer [Logger Reuse feature](#reusing-logger-across-your-code) over inheritance unless strictly necessary, [see caveats.](#reusing-logger-across-your-code)" + > Python Logging hierarchy happens via the dot notation: `service`, `service.child`, `service.child_2` For inheritance, Logger uses a `child=True` parameter along with `service` being the same value across Loggers. @@ -692,38 +392,34 @@ For child Loggers, we introspect the name of your module where `Logger(child=Tru === "incorrect_logger_inheritance.py" - ```python hl_lines="4 10" - import my_module - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - ... + ```python hl_lines="1 9" + --8<-- "examples/logger/src/logging_inheritance_bad.py" + ``` - # my_module.py - from aws_lambda_powertools import Logger +=== "my_other_module.py" - logger = Logger(child=True) + ```python hl_lines="1 9" + --8<-- "examples/logger/src/logging_inheritance_module.py" ``` -=== "correct_logger_inheritance.py" +In this case, Logger will register a Logger named `payment`, and a Logger named `service_undefined`. The latter isn't inheriting from the parent, and will have no handler, resulting in no message being logged to standard output. - ```python hl_lines="4 10" - import my_module - from aws_lambda_powertools import Logger +???+ tip + This can be fixed by either ensuring both has the `service` value as `payment`, or simply use the environment variable `POWERTOOLS_SERVICE_NAME` to ensure service value will be the same across all Loggers when not explicitly set. - logger = Logger(service="payment") - ... +Do this instead: - # my_module.py - from aws_lambda_powertools import Logger +=== "correct_logger_inheritance.py" - logger = Logger(service="payment", child=True) + ```python hl_lines="1 9" + --8<-- "examples/logger/src/logging_inheritance_good.py" ``` -In this case, Logger will register a Logger named `payment`, and a Logger named `service_undefined`. The latter isn't inheriting from the parent, and will have no handler, resulting in no message being logged to standard output. +=== "my_other_module.py" -???+ tip - This can be fixed by either ensuring both has the `service` value as `payment`, or simply use the environment variable `POWERTOOLS_SERVICE_NAME` to ensure service value will be the same across all Loggers when not explicitly set. + ```python hl_lines="1 9" + --8<-- "examples/logger/src/logging_inheritance_module.py" + ``` #### Overriding Log records @@ -736,126 +432,71 @@ You might want to continue to use the same date formatting style, or override `l Logger allows you to either change the format or suppress the following keys altogether at the initialization: `location`, `timestamp`, `level`, `xray_trace_id`. - === "lambda_handler.py" - ```python hl_lines="7 10" - from aws_lambda_powertools import Logger - - date_format = "%m/%d/%Y %I:%M:%S %p" - location_format = "[%(funcName)s] %(module)s" - # override location and timestamp format - logger = Logger(service="payment", location=location_format, datefmt=date_format) - - # suppress the location key with a None value - logger_two = Logger(service="payment", location=None) - - logger.info("Collecting payment") + ```python hl_lines="7 10" + --8<-- "examples/logger/src/overriding_log_records.py" ``` + === "Example CloudWatch Logs excerpt" ```json hl_lines="3 5" - { - "level": "INFO", - "location": "[] lambda_handler", - "message": "Collecting payment", - "timestamp": "02/09/2021 09:25:17 AM", - "service": "payment" - } + --8<-- "examples/logger/src/overriding_log_records_output.json" ``` #### Reordering log keys position You can change the order of [standard Logger keys](#standard-structured-keys) or any keys that will be appended later at runtime via the `log_record_order` parameter. -=== "lambda_handler.py" - - ```python hl_lines="4 7" - from aws_lambda_powertools import Logger - - # make message as the first key - logger = Logger(service="payment", log_record_order=["message"]) - - # make request_id that will be added later as the first key - # Logger(service="payment", log_record_order=["request_id"]) +=== "app.py" - # Default key sorting order when omit - # Logger(service="payment", log_record_order=["level","location","message","timestamp"]) + ```python hl_lines="5 8" + --8<-- "examples/logger/src/reordering_log_keys.py" ``` === "Example CloudWatch Logs excerpt" - ```json hl_lines="3 5" - { - "message": "hello world", - "level": "INFO", - "location": "[]:6", - "timestamp": "2021-02-09 09:36:12,280", - "service": "service_undefined", - "sampling_rate": 0.0 - } + ```json hl_lines="3 10" + --8<-- "examples/logger/src/reordering_log_keys_output.json" ``` #### Setting timestamp to UTC -By default, this Logger and standard logging library emits records using local time timestamp. You can override this behaviour via `utc` parameter: +By default, this Logger and standard logging library emits records using local time timestamp. You can override this behavior via `utc` parameter: -```python hl_lines="6" title="Setting UTC timestamp by default" -from aws_lambda_powertools import Logger - -logger = Logger(service="payment") -logger.info("Local time") - -logger_in_utc = Logger(service="payment", utc=True) -logger_in_utc.info("GMT time zone") -``` - -#### Custom function for unserializable values +=== "app.py" -By default, Logger uses `str` to handle values non-serializable by JSON. You can override this behaviour via `json_default` parameter by passing a Callable: + ```python hl_lines="6" + --8<-- "examples/logger/src/setting_utc_timestamp.py" + ``` -=== "collect.py" +=== "Example CloudWatch Logs excerpt" - ```python hl_lines="3-4 9 12" - from aws_lambda_powertools import Logger + ```json hl_lines="6 13" + --8<-- "examples/logger/src/setting_utc_timestamp_output.json" + ``` - def custom_json_default(value): - return f"" +#### Custom function for unserializable values - class Unserializable: - pass +By default, Logger uses `str` to handle values non-serializable by JSON. You can override this behavior via `json_default` parameter by passing a Callable: - logger = Logger(service="payment", json_default=custom_json_default) +=== "app.py" - def handler(event, context): - logger.info(Unserializable()) + ```python hl_lines="6 17" + --8<-- "examples/logger/src/unserializable_values.py" ``` + === "Example CloudWatch Logs excerpt" - ```json hl_lines="4" - { - "level": "INFO", - "location": "collect.handler:8", - "message": """", - "timestamp": "2021-05-03 15:17:23,632+0200", - "service": "payment" - } + ```json hl_lines="4-6" + --8<-- "examples/logger/src/unserializable_values_output.json" ``` #### Bring your own handler -By default, Logger uses StreamHandler and logs to standard output. You can override this behaviour via `logger_handler` parameter: - -```python hl_lines="3-4 9 12" title="Configure Logger to output to a file" -import logging -from pathlib import Path - -from aws_lambda_powertools import Logger +By default, Logger uses StreamHandler and logs to standard output. You can override this behavior via `logger_handler` parameter: -log_file = Path("/tmp/log.json") -log_file_handler = logging.FileHandler(filename=log_file) -logger = Logger(service="payment", logger_handler=log_file_handler) - -logger.info("Collecting payment") +```python hl_lines="7-8 10" title="Configure Logger to output to a file" +--8<-- "examples/logger/src/bring_your_own_handler.py" ``` #### Bring your own formatter @@ -869,30 +510,13 @@ For these, you can override the `serialize` method from [LambdaPowertoolsFormatt === "custom_formatter.py" - ```python hl_lines="6-7 12" - from aws_lambda_powertools import Logger - from aws_lambda_powertools.logging.formatter import LambdaPowertoolsFormatter - - from typing import Dict - - class CustomFormatter(LambdaPowertoolsFormatter): - def serialize(self, log: Dict) -> str: - """Serialize final structured log dict to JSON str""" - log["event"] = log.pop("message") # rename message key to event - return self.json_serializer(log) # use configured json serializer - - logger = Logger(service="example", logger_formatter=CustomFormatter()) - logger.info("hello") + ```python hl_lines="2 5-6 12" + --8<-- "examples/logger/src/bring_your_own_formatter.py" ``` === "Example CloudWatch Logs excerpt" - ```json hl_lines="5" - { - "level": "INFO", - "location": ":16", - "timestamp": "2021-12-30 13:41:53,413+0100", - "event": "hello" - } + ```json hl_lines="6" + --8<-- "examples/logger/src/bring_your_own_formatter_output.json" ``` The `log` argument is the final log record containing [our standard keys](#standard-structured-keys), optionally [Lambda context keys](#capturing-lambda-context-info), and any custom key you might have added via [append_keys](#append_keys-method) or the [extra parameter](#extra-parameter). @@ -902,86 +526,26 @@ For exceptional cases where you want to completely replace our formatter logic, ???+ warning You will need to implement `append_keys`, `clear_state`, override `format`, and optionally `remove_keys` to keep the same feature set Powertools Logger provides. This also means keeping state of logging keys added. - === "collect.py" - ```python hl_lines="5 7 9-10 13 17 21 24 35" - import logging - from typing import Iterable, List, Optional - - from aws_lambda_powertools import Logger - from aws_lambda_powertools.logging.formatter import BasePowertoolsFormatter - - class CustomFormatter(BasePowertoolsFormatter): - def __init__(self, log_record_order: Optional[List[str]], *args, **kwargs): - self.log_record_order = log_record_order or ["level", "location", "message", "timestamp"] - self.log_format = dict.fromkeys(self.log_record_order) - super().__init__(*args, **kwargs) - - def append_keys(self, **additional_keys): - # also used by `inject_lambda_context` decorator - self.log_format.update(additional_keys) - - def remove_keys(self, keys: Iterable[str]): - for key in keys: - self.log_format.pop(key, None) - - def clear_state(self): - self.log_format = dict.fromkeys(self.log_record_order) - - def format(self, record: logging.LogRecord) -> str: # noqa: A003 - """Format logging record as structured JSON str""" - return json.dumps( - { - "event": super().format(record), - "timestamp": self.formatTime(record), - "my_default_key": "test", - **self.log_format, - } - ) - - logger = Logger(service="payment", logger_formatter=CustomFormatter()) - - @logger.inject_lambda_context - def handler(event, context): - logger.info("Collecting payment") + ```python hl_lines="6 9 11-12 15 19 23 26 38" + --8<-- "examples/logger/src/bring_your_own_formatter_from_scratch.py" ``` + === "Example CloudWatch Logs excerpt" ```json hl_lines="2-4" - { - "event": "Collecting payment", - "timestamp": "2021-05-03 11:47:12,494", - "my_default_key": "test", - "cold_start": true, - "lambda_function_name": "test", - "lambda_function_memory_size": 128, - "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", - "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" - } + --8<-- "examples/logger/src/bring_your_own_formatter_from_scratch_output.json" ``` #### Bring your own JSON serializer By default, Logger uses `json.dumps` and `json.loads` as serializer and deserializer respectively. There could be scenarios where you are making use of alternative JSON libraries like [orjson](https://github.com/ijl/orjson){target="_blank"}. -As parameters don't always translate well between them, you can pass any callable that receives a `Dict` and return a `str`: - -```python hl_lines="1 5-6 9-10" title="Using Rust orjson library as serializer" -import orjson - -from aws_lambda_powertools import Logger +As parameters don't always translate well between them, you can pass any callable that receives a `dict` and return a `str`: -custom_serializer = orjson.dumps -custom_deserializer = orjson.loads - -logger = Logger(service="payment", - json_serializer=custom_serializer, - json_deserializer=custom_deserializer -) - -# when using parameters, you can pass a partial -# custom_serializer=functools.partial(orjson.dumps, option=orjson.OPT_SERIALIZE_NUMPY) +```python hl_lines="1 3 7-8 13" title="Using Rust orjson library as serializer" +--8<-- "examples/logger/src/bring_your_own_json_serializer.py" ``` ## Testing your code @@ -996,48 +560,13 @@ This is a Pytest sample that provides the minimum information necessary for Logg Note that dataclasses are available in Python 3.7+ only. ```python - from dataclasses import dataclass - - import pytest - - @pytest.fixture - def lambda_context(): - @dataclass - class LambdaContext: - function_name: str = "test" - memory_limit_in_mb: int = 128 - invoked_function_arn: str = "arn:aws:lambda:eu-west-1:809313241:function:test" - aws_request_id: str = "52fdfc07-2182-154f-163f-5f0f9a621d72" - - return LambdaContext() - - def test_lambda_handler(lambda_context): - test_event = {'test': 'event'} - your_lambda_handler(test_event, lambda_context) # this will now have a Context object populated + --8<-- "examples/logger/src/fake_lambda_context_for_logger.py" ``` -=== "fake_lambda_context_for_logger_py36.py" - - ```python - from collections import namedtuple - import pytest +=== "fake_lambda_context_for_logger_module.py" - @pytest.fixture - def lambda_context(): - lambda_context = { - "function_name": "test", - "memory_limit_in_mb": 128, - "invoked_function_arn": "arn:aws:lambda:eu-west-1:809313241:function:test", - "aws_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", - } - - return namedtuple("LambdaContext", lambda_context.keys())(*lambda_context.values()) - - def test_lambda_handler(lambda_context): - test_event = {'test': 'event'} - - # this will now have a Context object populated - your_lambda_handler(test_event, lambda_context) + ```python + --8<-- "examples/logger/src/fake_lambda_context_for_logger_module.py" ``` ???+ tip @@ -1056,97 +585,64 @@ POWERTOOLS_LOG_DEDUPLICATION_DISABLED="1" pytest -o log_cli=1 ## FAQ -**How can I enable boto3 and botocore library logging?** +### How can I enable boto3 and botocore library logging? You can enable the `botocore` and `boto3` logs by using the `set_stream_logger` method, this method will add a stream handler for the given name and level to the logging module. By default, this logs all boto3 messages to stdout. -```python hl_lines="6-7" title="Enabling AWS SDK logging" -from typing import Dict, List -from aws_lambda_powertools.utilities.typing import LambdaContext -from aws_lambda_powertools import Logger - -import boto3 -boto3.set_stream_logger() -boto3.set_stream_logger('botocore') +```python hl_lines="8-9" title="Enabling AWS SDK logging" +---8<-- "examples/logger/src/enabling_boto_logging.py" +``` -logger = Logger() -client = boto3.client('s3') +### How can I enable Powertools logging for imported libraries? +You can copy the Logger setup to all or sub-sets of registered external loggers. Use the `copy_config_to_registered_logger` method to do this. -def handler(event: Dict, context: LambdaContext) -> List: - response = client.list_buckets() +By default all registered loggers will be modified. You can change this behavior by providing `include` and `exclude` attributes. You can also provide optional `log_level` attribute external loggers will be configured with. - return response.get("Buckets", []) +```python hl_lines="10" title="Cloning Logger config to all other registered standard loggers" +---8<-- "examples/logger/src/cloning_logger_config.py" ``` -**How can I enable powertools logging for imported libraries?** +### How can I add standard library logging attributes to a log record? -You can copy the Logger setup to all or sub-sets of registered external loggers. Use the `copy_config_to_registered_logger` method to do this. By default all registered loggers will be modified. You can change this behaviour by providing `include` and `exclude` attributes. You can also provide optional `log_level` attribute external loggers will be configured with. +The Python standard library log records contains a [large set of atttributes](https://docs.python.org/3/library/logging.html#logrecord-attributes){target="_blank"}, however only a few are included in Powertools Logger log record by default. +You can include any of these logging attributes as key value arguments (`kwargs`) when instantiating `Logger` or `LambdaPowertoolsFormatter`. -```python hl_lines="10" title="Cloning Logger config to all other registered standard loggers" -import logging +You can also add them later anywhere in your code with `append_keys`, or remove them with `remove_keys` methods. -from aws_lambda_powertools import Logger -from aws_lambda_powertools.logging import utils +=== "collect.py" -logger = Logger() + ```python hl_lines="3 8 10" + ---8<-- "examples/logger/src/append_and_remove_keys.py" + ``` +=== "Example CloudWatch Logs excerpt" -external_logger = logging.logger() + ```json hl_lines="6 15-16" + ---8<-- "examples/logger/src/append_and_remove_keys.json" + ``` -utils.copy_config_to_registered_loggers(source_logger=logger) -external_logger.info("test message") -``` +For log records originating from Powertools Logger, the `name` attribute will be the same as `service`, for log records coming from standard library logger, it will be the name of the logger (i.e. what was used as name argument to `logging.getLogger`). -**What's the difference between `append_keys` and `extra`?** +### What's the difference between `append_keys` and `extra`? Keys added with `append_keys` will persist across multiple log messages while keys added via `extra` will only be available in a given log message operation. Here's an example where we persist `payment_id` not `request_id`. Note that `payment_id` remains in both log messages while `booking_id` is only available in the first message. -=== "lambda_handler.py" - - ```python hl_lines="6 10" - from aws_lambda_powertools import Logger - - logger = Logger(service="payment") - - def handler(event, context): - logger.append_keys(payment_id="123456789") - - try: - booking_id = book_flight() - logger.info("Flight booked successfully", extra={ "booking_id": booking_id}) - except BookingReservationError: - ... +=== "collect.py" - logger.info("goodbye") + ```python hl_lines="16 23" + ---8<-- "examples/logger/src/append_keys_vs_extra.py" ``` + === "Example CloudWatch Logs excerpt" - ```json hl_lines="8-9 18" - { - "level": "INFO", - "location": ":10", - "message": "Flight booked successfully", - "timestamp": "2021-01-12 14:09:10,859", - "service": "payment", - "sampling_rate": 0.0, - "payment_id": "123456789", - "booking_id": "75edbad0-0857-4fc9-b547-6180e2f7959b" - }, - { - "level": "INFO", - "location": ":14", - "message": "goodbye", - "timestamp": "2021-01-12 14:09:10,860", - "service": "payment", - "sampling_rate": 0.0, - "payment_id": "123456789" - } + ```json hl_lines="9-10 19" + ---8<-- "examples/logger/src/append_keys_vs_extra_output.json" ``` -**How do I aggregate and search Powertools logs across accounts?** +### How do I aggregate and search Powertools logs across accounts? As of now, ElasticSearch (ELK) or 3rd party solutions are best suited to this task. Please refer to this [discussion for more details](https://github.com/awslabs/aws-lambda-powertools-python/issues/460) diff --git a/docs/core/metrics.md b/docs/core/metrics.md index 713ae874fe6..24a8f1e6fda 100644 --- a/docs/core/metrics.md +++ b/docs/core/metrics.md @@ -38,34 +38,14 @@ Metric has two global settings that will be used across all metrics emitted: ???+ tip Use your application or main service as the metric namespace to easily group all metrics. -???+ example - **AWS Serverless Application Model (SAM)** - -=== "template.yml" - - ```yaml hl_lines="9 10" - Resources: - HelloWorldFunction: - Type: AWS::Serverless::Function - Properties: - Runtime: python3.8 - Environment: - Variables: - POWERTOOLS_SERVICE_NAME: payment - POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline - ``` - -=== "app.py" - - ```python hl_lines="4 6" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit +```yaml hl_lines="13" title="AWS Serverless Application Model (SAM) example" +--8<-- "examples/metrics/sam/template.yaml" +``` - metrics = Metrics() # Sets metric namespace and service via env var - # OR - metrics = Metrics(namespace="ServerlessAirline", service="orders") # Sets metric namespace, and service as a metric dimension - ``` +???+ note + For brevity, all code snippets in this page will rely on environment variables above being set. + This ensures we instantiate `metrics = Metrics()` over `metrics = Metrics(service="booking", namespace="ServerlessAirline")`, etc. ### Creating metrics @@ -76,70 +56,57 @@ You can create metrics using `add_metric`, and you can create dimensions for all === "Metrics" - ```python hl_lines="8" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - metrics = Metrics(namespace="ExampleApplication", service="booking") - - @metrics.log_metrics - def lambda_handler(evt, ctx): - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + ```python hl_lines="10" + --8<-- "examples/metrics/src/add_metrics.py" ``` -=== "Metrics with custom dimensions" - ```python hl_lines="8-9" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - metrics = Metrics(namespace="ExampleApplication", service="booking") +=== "Metrics with custom dimensions" - @metrics.log_metrics - def lambda_handler(evt, ctx): - metrics.add_dimension(name="environment", value="prod") - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + ```python hl_lines="13" + --8<-- "examples/metrics/src/add_dimension.py" ``` ???+ tip "Tip: Autocomplete Metric Units" - `MetricUnit` enum facilitate finding a supported metric unit by CloudWatch. Alternatively, you can pass the value as a string if you already know them e.g. "Count". + `MetricUnit` enum facilitate finding a supported metric unit by CloudWatch. Alternatively, you can pass the value as a string if you already know them _e.g. `unit="Count"`_. ???+ note "Note: Metrics overflow" - CloudWatch EMF supports a max of 100 metrics per batch. Metrics utility will flush all metrics when adding the 100th metric. Subsequent metrics, e.g. 101th, will be aggregated into a new EMF object, for your convenience. + CloudWatch EMF supports a max of 100 metrics per batch. Metrics utility will flush all metrics when adding the 100th metric. Subsequent metrics (101th+) will be aggregated into a new EMF object, for your convenience. ???+ warning "Warning: Do not create metrics or dimensions outside the handler" - Metrics or dimensions added in the global scope will only be added during cold start. Disregard if you that's the intended behaviour. + Metrics or dimensions added in the global scope will only be added during cold start. Disregard if you that's the intended behavior. + +### Adding multi-value metrics + +You can call `add_metric()` with the same metric name multiple times. The values will be grouped together in a list. + +=== "Metrics" + + ```python hl_lines="14-15" + --8<-- "examples/metrics/src/add_multi_value_metrics.py" + ``` + +=== "Example CloudWatch Logs excerpt" + + ```python hl_lines="15 24-26" + --8<-- "examples/metrics/src/add_multi_value_metrics_output.json" + ``` ### Adding default dimensions -You can use either `set_default_dimensions` method or `default_permissions` parameter in `log_metrics` decorator to persist dimensions across Lambda invocations. +You can use `set_default_dimensions` method, or `default_dimensions` parameter in `log_metrics` decorator, to persist dimensions across Lambda invocations. If you'd like to remove them at some point, you can use `clear_default_dimensions` method. === "set_default_dimensions method" - ```python hl_lines="5" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - metrics = Metrics(namespace="ExampleApplication", service="booking") - metrics.set_default_dimensions(environment="prod", another="one") - - @metrics.log_metrics - def lambda_handler(evt, ctx): - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + ```python hl_lines="9" + --8<-- "examples/metrics/src/set_default_dimensions.py" ``` -=== "with log_metrics decorator" - ```python hl_lines="5 7" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - metrics = Metrics(namespace="ExampleApplication", service="booking") - DEFAULT_DIMENSIONS = {"environment": "prod", "another": "one"} +=== "with log_metrics decorator" - @metrics.log_metrics(default_dimensions=DEFAULT_DIMENSIONS) - def lambda_handler(evt, ctx): - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + ```python hl_lines="9 13" + --8<-- "examples/metrics/src/set_default_dimensions_log_metrics.py" ``` ### Flushing metrics @@ -150,42 +117,14 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr === "app.py" - ```python hl_lines="6" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - metrics = Metrics(namespace="ExampleApplication", service="ExampleService") - - @metrics.log_metrics - def lambda_handler(evt, ctx): - metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1) + ```python hl_lines="8" + --8<-- "examples/metrics/src/add_metrics.py" ``` + === "Example CloudWatch Logs excerpt" - ```json hl_lines="2 7 10 15 22" - { - "BookingConfirmation": 1.0, - "_aws": { - "Timestamp": 1592234975665, - "CloudWatchMetrics": [ - { - "Namespace": "ExampleApplication", - "Dimensions": [ - [ - "service" - ] - ], - "Metrics": [ - { - "Name": "BookingConfirmation", - "Unit": "Count" - } - ] - } - ] - }, - "service": "ExampleService" - } + ```json hl_lines="6 9 14 21-23" + --8<-- "examples/metrics/src/log_metrics_output.json" ``` ???+ tip "Tip: Metric validation" @@ -199,49 +138,28 @@ This decorator also **validates**, **serializes**, and **flushes** all your metr If you want to ensure at least one metric is always emitted, you can pass `raise_on_empty_metrics` to the **log_metrics** decorator: -```python hl_lines="5" title="Raising SchemaValidationError exception if no metrics are added" -from aws_lambda_powertools.metrics import Metrics - -metrics = Metrics() - -@metrics.log_metrics(raise_on_empty_metrics=True) -def lambda_handler(evt, ctx): - ... +```python hl_lines="7" title="Raising SchemaValidationError exception if no metrics are added" +--8<-- "examples/metrics/src/raise_on_empty_metrics.py" ``` ???+ tip "Suppressing warning messages on empty metrics" If you expect your function to execute without publishing metrics every time, you can suppress the warning with **`warnings.filterwarnings("ignore", "No metrics to publish*")`**. -#### Nesting multiple middlewares - -When using multiple middlewares, use `log_metrics` as your **last decorator** wrapping all subsequent ones to prevent early Metric validations when code hasn't been run yet. - -```python hl_lines="7-8" title="Example with multiple decorators" -from aws_lambda_powertools import Metrics, Tracer -from aws_lambda_powertools.metrics import MetricUnit - -tracer = Tracer(service="booking") -metrics = Metrics(namespace="ExampleApplication", service="booking") - -@metrics.log_metrics -@tracer.capture_lambda_handler -def lambda_handler(evt, ctx): - metrics.add_metric(name="BookingConfirmation", unit=MetricUnit.Count, value=1) -``` - ### Capturing cold start metric You can optionally capture cold start metrics with `log_metrics` decorator via `capture_cold_start_metric` param. -```python hl_lines="5" title="Generating function cold start metric" -from aws_lambda_powertools import Metrics +=== "app.py" + + ```python hl_lines="7" + --8<-- "examples/metrics/src/capture_cold_start_metric.py" + ``` -metrics = Metrics(service="ExampleService") +=== "Example CloudWatch Logs excerpt" -@metrics.log_metrics(capture_cold_start_metric=True) -def lambda_handler(evt, ctx): - ... -``` + ```json hl_lines="9 15 22 24-25" + --8<-- "examples/metrics/src/capture_cold_start_metric_output.json" + ``` If it's a cold start invocation, this feature will: @@ -264,45 +182,14 @@ You can add high-cardinality data as part of your Metrics log with `add_metadata === "app.py" - ```python hl_lines="9" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - metrics = Metrics(namespace="ExampleApplication", service="booking") - - @metrics.log_metrics - def lambda_handler(evt, ctx): - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) - metrics.add_metadata(key="booking_id", value="booking_uuid") + ```python hl_lines="14" + --8<-- "examples/metrics/src/add_metadata.py" ``` === "Example CloudWatch Logs excerpt" - ```json hl_lines="23" - { - "SuccessfulBooking": 1.0, - "_aws": { - "Timestamp": 1592234975665, - "CloudWatchMetrics": [ - { - "Namespace": "ExampleApplication", - "Dimensions": [ - [ - "service" - ] - ], - "Metrics": [ - { - "Name": "SuccessfulBooking", - "Unit": "Count" - } - ] - } - ] - }, - "service": "booking", - "booking_id": "booking_uuid" - } + ```json hl_lines="22" + --8<-- "examples/metrics/src/add_metadata_output.json" ``` ### Single metric with a different dimension @@ -314,16 +201,17 @@ CloudWatch EMF uses the same dimensions across all your metrics. Use `single_met **unique metric = (metric_name + dimension_name + dimension_value)** -```python hl_lines="6-7" title="Generating an EMF blob with a single metric" -from aws_lambda_powertools import single_metric -from aws_lambda_powertools.metrics import MetricUnit +=== "app.py" + ```python hl_lines="11" + --8<-- "examples/metrics/src/single_metric.py" + ``` -def lambda_handler(evt, ctx): - with single_metric(name="ColdStart", unit=MetricUnit.Count, value=1, namespace="ExampleApplication") as metric: - metric.add_dimension(name="function_context", value="$LATEST") - ... -``` +=== "Example CloudWatch Logs excerpt" + + ```json hl_lines="15" + --8<-- "examples/metrics/src/single_metric_output.json" + ``` ### Flushing metrics manually @@ -332,18 +220,8 @@ If you prefer not to use `log_metrics` because you might want to encapsulate add ???+ warning Metrics, dimensions and namespace validation still applies -```python hl_lines="9-11" title="Manually flushing and clearing metrics from memory" -import json -from aws_lambda_powertools import Metrics -from aws_lambda_powertools.metrics import MetricUnit - -metrics = Metrics(namespace="ExampleApplication", service="booking") - -def lambda_handler(evt, ctx): - metrics.add_metric(name="ColdStart", unit=MetricUnit.Count, value=1) - your_metrics_object = metrics.serialize_metric_set() - metrics.clear_metrics() - print(json.dumps(your_metrics_object)) +```python hl_lines="11-14" title="Manually flushing and clearing metrics from memory" +--8<-- "examples/metrics/src/single_metric.py" ``` ## Testing your code @@ -351,99 +229,55 @@ def lambda_handler(evt, ctx): ### Environment variables ???+ tip - Ignore this section, if you are explicitly setting namespace/default dimension via `namespace` and `service` parameters. + Ignore this section, if: - For example, `Metrics(namespace=ApplicationName, service=ServiceName)` + * You are explicitly setting namespace/default dimension via `namespace` and `service` parameters + * You're not instantiating `Metrics` in the global namespace -Use `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` env vars when unit testing your code to ensure metric namespace and dimension objects are created, and your code doesn't fail validation. + For example, `Metrics(namespace="ServerlessAirline", service="booking")` + +Make sure to set `POWERTOOLS_METRICS_NAMESPACE` and `POWERTOOLS_SERVICE_NAME` before running your tests to prevent failing on `SchemaValidation` exception. You can set it before you run tests or via pytest plugins like [dotenv](https://pypi.org/project/pytest-dotenv/). ```bash title="Injecting dummy Metric Namespace before running tests" -POWERTOOLS_SERVICE_NAME="Example" POWERTOOLS_METRICS_NAMESPACE="Application" python -m pytest +--8<-- "examples/metrics/src/run_tests_env_var.sh" ``` ### Clearing metrics -`Metrics` keep metrics in memory across multiple instances. If you need to test this behaviour, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: +`Metrics` keep metrics in memory across multiple instances. If you need to test this behavior, you can use the following Pytest fixture to ensure metrics are reset incl. cold start: ```python title="Clearing metrics between tests" -@pytest.fixture(scope="function", autouse=True) -def reset_metric_set(): - # Clear out every metric data prior to every test - metrics = Metrics() - metrics.clear_metrics() - metrics_global.is_cold_start = True # ensure each test has cold start - metrics.clear_default_dimensions() # remove persisted default dimensions, if any - yield +--8<-- "examples/metrics/src/clear_metrics_in_tests.py" ``` ### Functional testing -As metrics are logged to standard output, you can read standard output and assert whether metrics are present. Here's an example using `pytest` with `capsys` built-in fixture: +You can read standard output and assert whether metrics have been flushed. Here's an example using `pytest` with `capsys` built-in fixture: -=== "Assert single EMF blob with pytest.py" +=== "Asserting single EMF blob" ```python hl_lines="6 9-10 23-34" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - import json - - def test_log_metrics(capsys): - # GIVEN Metrics is initialized - metrics = Metrics(namespace="ServerlessAirline") - - # WHEN we utilize log_metrics to serialize - # and flush all metrics at the end of a function execution - @metrics.log_metrics - def lambda_handler(evt, ctx): - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) - metrics.add_dimension(name="environment", value="prod") - - lambda_handler({}, {}) - log = capsys.readouterr().out.strip() # remove any extra line - metrics_output = json.loads(log) # deserialize JSON str - - # THEN we should have no exceptions - # and a valid EMF object should be flushed correctly - assert "SuccessfulBooking" in log # basic string assertion in JSON str - assert "SuccessfulBooking" in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] + --8<-- "examples/metrics/src/assert_single_emf_blob.py" ``` -=== "Assert multiple EMF blobs with pytest" - - ```python hl_lines="8-9 11 21-23 25 29-30 32" - from aws_lambda_powertools import Metrics - from aws_lambda_powertools.metrics import MetricUnit - - from collections import namedtuple - - import json +=== "add_metrics.py" - def capture_metrics_output_multiple_emf_objects(capsys): - return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line] - - def test_log_metrics(capsys): - # GIVEN Metrics is initialized - metrics = Metrics(namespace="ServerlessAirline") + ```python + --8<-- "examples/metrics/src/add_metrics.py" + ``` - # WHEN log_metrics is used with capture_cold_start_metric - @metrics.log_metrics(capture_cold_start_metric=True) - def lambda_handler(evt, ctx): - metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) - metrics.add_dimension(name="environment", value="prod") +=== "Asserting multiple EMF blobs" - # log_metrics uses function_name property from context to add as a dimension for cold start metric - LambdaContext = namedtuple("LambdaContext", "function_name") - lambda_handler({}, LambdaContext("example_fn") + This will be needed when using `capture_cold_start_metric=True`, or when both `Metrics` and `single_metric` are used. - cold_start_blob, custom_metrics_blob = capture_metrics_output_multiple_emf_objects(capsys) + ```python hl_lines="20-21 27" + --8<-- "examples/metrics/src/assert_multiple_emf_blobs.py" + ``` - # THEN ColdStart metric and function_name dimension should be logged - # in a separate EMF blob than the application metrics - assert cold_start_blob["ColdStart"] == [1.0] - assert cold_start_blob["function_name"] == "example_fn" +=== "my_other_module.py" - assert "SuccessfulBooking" in custom_metrics_blob # as per previous example + ```python + --8<-- "examples/metrics/src/assert_multiple_emf_blobs_module.py" ``` ???+ tip diff --git a/docs/core/tracer.md b/docs/core/tracer.md index 363611bbbc0..c8037eff241 100644 --- a/docs/core/tracer.md +++ b/docs/core/tracer.md @@ -20,33 +20,16 @@ Tracer is an opinionated thin wrapper for [AWS X-Ray Python SDK](https://github. Before your use this utility, your AWS Lambda function [must have permissions](https://docs.aws.amazon.com/lambda/latest/dg/services-xray.html#services-xray-permissions) to send traces to AWS X-Ray. -```yaml hl_lines="6 9" title="AWS Serverless Application Model (SAM) example" -Resources: - HelloWorldFunction: - Type: AWS::Serverless::Function - Properties: - Runtime: python3.8 - Tracing: Active - Environment: - Variables: - POWERTOOLS_SERVICE_NAME: example +```yaml hl_lines="9 12" title="AWS Serverless Application Model (SAM) example" +--8<-- "examples/tracer/sam/template.yaml" ``` ### Lambda handler You can quickly start by initializing `Tracer` and use `capture_lambda_handler` decorator for your Lambda handler. -```python hl_lines="1 3 6" title="Tracing Lambda handler with capture_lambda_handler" -from aws_lambda_powertools import Tracer - -tracer = Tracer() # Sets service via env var -# OR tracer = Tracer(service="example") - -@tracer.capture_lambda_handler -def handler(event, context): - charge_id = event.get('charge_id') - payment = collect_payment(charge_id) - ... +```python hl_lines="1 4 12" title="Tracing Lambda handler with capture_lambda_handler" +--8<-- "examples/tracer/src/capture_lambda_handler.py" ``` `capture_lambda_handler` performs these additional tasks to ease operations: @@ -59,39 +42,22 @@ def handler(event, context): **Annotations** are key-values associated with traces and indexed by AWS X-Ray. You can use them to filter traces and to create [Trace Groups](https://aws.amazon.com/about-aws/whats-new/2018/11/aws-xray-adds-the-ability-to-group-traces/) to slice and dice your transactions. -```python hl_lines="7" title="Adding annotations with put_annotation method" -from aws_lambda_powertools import Tracer -tracer = Tracer() - -@tracer.capture_lambda_handler -def handler(event, context): - ... - tracer.put_annotation(key="PaymentStatus", value="SUCCESS") +```python hl_lines="8" title="Adding annotations with put_annotation method" +--8<-- "examples/tracer/src/put_trace_annotations.py" ``` **Metadata** are key-values also associated with traces but not indexed by AWS X-Ray. You can use them to add additional context for an operation using any native object. -```python hl_lines="8" title="Adding arbitrary metadata with put_metadata method" -from aws_lambda_powertools import Tracer -tracer = Tracer() - -@tracer.capture_lambda_handler -def handler(event, context): - ... - ret = some_logic() - tracer.put_metadata(key="payment_response", value=ret) +```python hl_lines="19" title="Adding arbitrary metadata with put_metadata method" +--8<-- "examples/tracer/src/put_trace_metadata.py" ``` ### Synchronous functions You can trace synchronous functions using the `capture_method` decorator. -```python hl_lines="7 13" title="Tracing an arbitrary function with capture_method" -@tracer.capture_method -def collect_payment(charge_id): - ret = requests.post(PAYMENT_ENDPOINT) # logic - tracer.put_annotation("PAYMENT_STATUS", "SUCCESS") # custom annotation - return ret +```python hl_lines="7" title="Tracing an arbitrary function with capture_method" +--8<-- "examples/tracer/src/capture_method.py" ``` ???+ note "Note: Function responses are auto-captured and stored as JSON, by default." @@ -101,7 +67,6 @@ def collect_payment(charge_id): The serialization is performed by aws-xray-sdk via `jsonpickle` module. This can cause side effects for file-like objects like boto S3 `StreamingBody`, where its response will be read only once during serialization. - ### Asynchronous and generator functions ???+ warning @@ -111,47 +76,20 @@ You can trace asynchronous functions and generator functions (including context === "Async" - ```python hl_lines="7" - import asyncio - import contextlib - from aws_lambda_powertools import Tracer - - tracer = Tracer() - - @tracer.capture_method - async def collect_payment(): - ... + ```python hl_lines="9" + --8<-- "examples/tracer/src/capture_method_async.py" ``` === "Context manager" - ```python hl_lines="7-8" - import asyncio - import contextlib - from aws_lambda_powertools import Tracer - - tracer = Tracer() - - @contextlib.contextmanager - @tracer.capture_method - def collect_payment_ctxman(): - yield result - ... + ```python hl_lines="12-13" + --8<-- "examples/tracer/src/capture_method_context_manager.py" ``` === "Generators" ```python hl_lines="9" - import asyncio - import contextlib - from aws_lambda_powertools import Tracer - - tracer = Tracer() - - @tracer.capture_method - def collect_payment_gen(): - yield result - ... + --8<-- "examples/tracer/src/capture_method_generators.py" ``` ## Advanced @@ -162,14 +100,8 @@ Tracer automatically patches all [supported libraries by X-Ray](https://docs.aws If you're looking to shave a few microseconds, or milliseconds depending on your function memory configuration, you can patch specific modules using `patch_modules` param: -```python hl_lines="7" title="Example of explicitly patching boto3 and requests only" -import boto3 -import requests - -from aws_lambda_powertools import Tracer - -modules_to_be_patched = ["boto3", "requests"] -tracer = Tracer(patch_modules=modules_to_be_patched) +```python hl_lines="8" title="Example of explicitly patching requests only" +--8<-- "examples/tracer/src/patch_modules.py" ``` ### Disabling response auto-capture @@ -183,27 +115,14 @@ Use **`capture_response=False`** parameter in both `capture_lambda_handler` and === "sensitive_data_scenario.py" - ```python hl_lines="3 7" - from aws_lambda_powertools import Tracer - - @tracer.capture_method(capture_response=False) - def fetch_sensitive_information(): - return "sensitive_information" - - @tracer.capture_lambda_handler(capture_response=False) - def handler(event, context): - sensitive_information = fetch_sensitive_information() + ```python hl_lines="8 15" + --8<-- "examples/tracer/src/disable_capture_response.py" ``` -=== "streaming_object_scenario.py" - ```python hl_lines="3" - from aws_lambda_powertools import Tracer +=== "streaming_object_scenario.py" - @tracer.capture_method(capture_response=False) - def get_s3_object(bucket_name, object_key): - s3 = boto3.client("s3") - s3_object = get_object(Bucket=bucket_name, Key=object_key) - return s3_object + ```python hl_lines="19" + --8<-- "examples/tracer/src/disable_capture_response_streaming_body.py" ``` ### Disabling exception auto-capture @@ -213,12 +132,8 @@ Use **`capture_error=False`** parameter in both `capture_lambda_handler` and `ca ???+ info Useful when returning sensitive information in exceptions/stack traces you don't control -```python hl_lines="3 5" title="Disabling exception auto-capture for tracing metadata" -from aws_lambda_powertools import Tracer - -@tracer.capture_lambda_handler(capture_error=False) -def handler(event, context): - raise ValueError("some sensitive info in the stack trace...") +```python hl_lines="16 26" title="Disabling exception auto-capture for tracing metadata" +--8<-- "examples/tracer/src/disable_capture_error.py" ``` ### Ignoring certain HTTP endpoints @@ -227,47 +142,19 @@ You might have endpoints you don't want requests to be traced, perhaps due to th You can use `ignore_endpoint` method with the hostname and/or URLs you'd like it to be ignored - globs (`*`) are allowed. -```python title="Ignoring certain HTTP endpoints from being traced" -from aws_lambda_powertools import Tracer - -tracer = Tracer() -# ignore all calls to `ec2.amazon.com` -tracer.ignore_endpoint(hostname="ec2.amazon.com") -# ignore calls to `*.sensitive.com/password` and `*.sensitive.com/credit-card` -tracer.ignore_endpoint(hostname="*.sensitive.com", urls=["/password", "/credit-card"]) - - -def ec2_api_calls(): - return "suppress_api_responses" - -@tracer.capture_lambda_handler -def handler(event, context): - for x in long_list: - ec2_api_calls() +```python hl_lines="12-13" title="Ignoring certain HTTP endpoints from being traced" +--8<-- "examples/tracer/src/ignore_endpoints.py" ``` - ### Tracing aiohttp requests ???+ info This snippet assumes you have aiohttp as a dependency -You can use `aiohttp_trace_config` function to create a valid [aiohttp trace_config object](https://docs.aiohttp.org/en/stable/tracing_reference.html). This is necessary since X-Ray utilizes aiohttp trace hooks to capture requests end-to-end. +You can use `aiohttp_trace_config` function to create a valid [aiohttp trace_config object](https://docs.aiohttp.org/en/stable/tracing_reference.html){target="_blank"}. This is necessary since X-Ray utilizes [aiohttp](https://docs.aiohttp.org/en/stable/){target="_blank"} trace hooks to capture requests end-to-end. -```python hl_lines="5 10" title="Tracing aiohttp requests" -import asyncio -import aiohttp - -from aws_lambda_powertools import Tracer -from aws_lambda_powertools.tracing import aiohttp_trace_config - -tracer = Tracer() - -async def aiohttp_task(): - async with aiohttp.ClientSession(trace_configs=[aiohttp_trace_config()]) as session: - async with session.get("https://httpbin.org/json") as resp: - resp = await resp.json() - return resp +```python hl_lines="7 17" title="Tracing aiohttp requests" +--8<-- "examples/tracer/src/tracing_aiohttp.py" ``` ### Escape hatch mechanism @@ -276,16 +163,8 @@ You can use `tracer.provider` attribute to access all methods provided by AWS X- This is useful when you need a feature available in X-Ray that is not available in the Tracer utility, for example [thread-safe](https://github.com/aws/aws-xray-sdk-python/#user-content-trace-threadpoolexecutor), or [context managers](https://github.com/aws/aws-xray-sdk-python/#user-content-start-a-custom-segmentsubsegment). -```python hl_lines="7" title="Tracing a code block with in_subsegment escape hatch" -from aws_lambda_powertools import Tracer - -tracer = Tracer() - -@tracer.capture_lambda_handler -def handler(event, context): - with tracer.provider.in_subsegment('## custom subsegment') as subsegment: - ret = some_work() - subsegment.put_metadata('response', ret) +```python hl_lines="14" title="Tracing a code block with in_subsegment escape hatch" +--8<-- "examples/tracer/src/sdk_escape_hatch.py" ``` ### Concurrent asynchronous functions @@ -295,25 +174,8 @@ def handler(event, context): A safe workaround mechanism is to use `in_subsegment_async` available via Tracer escape hatch (`tracer.provider`). -```python hl_lines="6 7 12 15 17" title="Workaround to safely trace async concurrent functions" -import asyncio - -from aws_lambda_powertools import Tracer -tracer = Tracer() - -async def another_async_task(): - async with tracer.provider.in_subsegment_async("## another_async_task") as subsegment: - subsegment.put_annotation(key="key", value="value") - subsegment.put_metadata(key="key", value="value", namespace="namespace") - ... - -async def another_async_task_2(): - ... - -@tracer.capture_method -async def collect_payment(charge_id): - asyncio.gather(another_async_task(), another_async_task_2()) - ... +```python hl_lines="10 17 24" title="Workaround to safely trace async concurrent functions" +--8<-- "examples/tracer/src/capture_method_async_concurrency.py" ``` ### Reusing Tracer across your code @@ -329,28 +191,15 @@ Tracer keeps a copy of its configuration after the first initialization. This is === "handler.py" - ```python hl_lines="2 4 9" - from aws_lambda_powertools import Tracer - from payment import collect_payment - - tracer = Tracer(service="payment") - - @tracer.capture_lambda_handler - def handler(event, context): - charge_id = event.get('charge_id') - payment = collect_payment(charge_id) + ```python hl_lines="1 6" + --8<-- "examples/tracer/src/tracer_reuse.py" ``` -=== "payment.py" - A new instance of Tracer will be created but will reuse the previous Tracer instance configuration, similar to a Singleton. - - ```python hl_lines="3 5" - from aws_lambda_powertools import Tracer - tracer = Tracer(service="payment") +=== "tracer_reuse_payment.py" + A new instance of Tracer will be created but will reuse the previous Tracer instance configuration, similar to a Singleton. - @tracer.capture_method - def collect_payment(charge_id: str): - ... + ```python hl_lines="3" + --8<-- "examples/tracer/src/tracer_reuse_payment.py" ``` ## Testing your code diff --git a/docs/index.md b/docs/index.md index 83d841de153..cc3d437334e 100644 --- a/docs/index.md +++ b/docs/index.md @@ -3,18 +3,19 @@ title: Homepage description: AWS Lambda Powertools Python --- + + A suite of utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, idempotency, batching, and more. ???+ tip "Tip: Looking for a quick read through how the core features are used?" Check out [this detailed blog post](https://aws.amazon.com/blogs/opensource/simplifying-serverless-best-practices-with-lambda-powertools/) with a practical example. - ## Install Powertools is available in the following formats: -* **Lambda Layer**: [**arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:20**](#){: .copyMe}:clipboard: +* **Lambda Layer**: [**arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:21**](#){: .copyMe}:clipboard: * **PyPi**: **`pip install aws-lambda-powertools`** ???+ hint "Support this project by using Lambda Layers :heart:" @@ -22,7 +23,6 @@ Powertools is available in the following formats: When using Layers, you can add Lambda Powertools as a dev dependency (or as part of your virtual env) to not impact the development process. - ### Lambda Layer [Lambda Layer](https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html){target="_blank"} is a .zip file archive that can contain additional code, pre-packaged dependencies, data, or configuration files. Layers promote code sharing and separation of responsibilities so that you can iterate faster on writing business logic. @@ -31,25 +31,25 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: ??? note "Note: Expand to copy any regional Lambda Layer ARN" - | Region | Layer ARN - |--------------------------- | --------------------------- - | `us-east-1` | [arn:aws:lambda:us-east-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `us-east-2` | [arn:aws:lambda:us-east-2:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `us-west-1` | [arn:aws:lambda:us-west-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `us-west-2` | [arn:aws:lambda:us-west-2:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ap-south-1` | [arn:aws:lambda:ap-south-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ap-northeast-1` | [arn:aws:lambda:ap-northeast-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ap-northeast-2` | [arn:aws:lambda:ap-northeast-2:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ap-northeast-3` | [arn:aws:lambda:ap-northeast-3:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ap-southeast-1` | [arn:aws:lambda:ap-southeast-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ap-southeast-2` | [arn:aws:lambda:ap-southeast-2:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `eu-central-1` | [arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `eu-west-1` | [arn:aws:lambda:eu-west-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `eu-west-2` | [arn:aws:lambda:eu-west-2:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `eu-west-3` | [arn:aws:lambda:eu-west-3:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `eu-north-1` | [arn:aws:lambda:eu-north-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `ca-central-1` | [arn:aws:lambda:ca-central-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: - | `sa-east-1` | [arn:aws:lambda:sa-east-1:017000801446:layer:AWSLambdaPowertoolsPython:20](#){: .copyMe}:clipboard: + | Region | Layer ARN | + | ---------------- | -------------------------------------------------------------------------------------------------------- | + | `us-east-1` | [arn:aws:lambda:us-east-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `us-east-2` | [arn:aws:lambda:us-east-2:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `us-west-1` | [arn:aws:lambda:us-west-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `us-west-2` | [arn:aws:lambda:us-west-2:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ap-south-1` | [arn:aws:lambda:ap-south-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ap-northeast-1` | [arn:aws:lambda:ap-northeast-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ap-northeast-2` | [arn:aws:lambda:ap-northeast-2:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ap-northeast-3` | [arn:aws:lambda:ap-northeast-3:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ap-southeast-1` | [arn:aws:lambda:ap-southeast-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ap-southeast-2` | [arn:aws:lambda:ap-southeast-2:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `eu-central-1` | [arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `eu-west-1` | [arn:aws:lambda:eu-west-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `eu-west-2` | [arn:aws:lambda:eu-west-2:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `eu-west-3` | [arn:aws:lambda:eu-west-3:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `eu-north-1` | [arn:aws:lambda:eu-north-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `ca-central-1` | [arn:aws:lambda:ca-central-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | + | `sa-east-1` | [arn:aws:lambda:sa-east-1:017000801446:layer:AWSLambdaPowertoolsPython:21](#){: .copyMe}:clipboard: | ??? question "Can't find our Lambda Layer for your preferred AWS region?" You can use [Serverless Application Repository (SAR)](#sar) method, our [CDK Layer Construct](https://github.com/aws-samples/cdk-lambda-powertools-python-layer){target="_blank"}, or PyPi like you normally would for any other library. @@ -63,7 +63,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: Type: AWS::Serverless::Function Properties: Layers: - - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:20 + - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:21 ``` === "Serverless framework" @@ -73,7 +73,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: hello: handler: lambda_function.lambda_handler layers: - - arn:aws:lambda:${aws:region}:017000801446:layer:AWSLambdaPowertoolsPython:20 + - arn:aws:lambda:${aws:region}:017000801446:layer:AWSLambdaPowertoolsPython:21 ``` === "CDK" @@ -89,7 +89,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: powertools_layer = aws_lambda.LayerVersion.from_layer_version_arn( self, id="lambda-powertools", - layer_version_arn=f"arn:aws:lambda:{env.region}:017000801446:layer:AWSLambdaPowertoolsPython:20" + layer_version_arn=f"arn:aws:lambda:{env.region}:017000801446:layer:AWSLambdaPowertoolsPython:21" ) aws_lambda.Function(self, 'sample-app-lambda', @@ -138,7 +138,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: role = aws_iam_role.iam_for_lambda.arn handler = "index.test" runtime = "python3.9" - layers = ["arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:20"] + layers = ["arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:21"] source_code_hash = filebase64sha256("lambda_function_payload.zip") } @@ -157,7 +157,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: ? Do you want to configure advanced settings? Yes ... ? Do you want to enable Lambda layers for this function? Yes - ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:20 + ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:21 ❯ amplify push -y @@ -168,7 +168,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: - Name: ? Which setting do you want to update? Lambda layers configuration ? Do you want to enable Lambda layers for this function? Yes - ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:20 + ? Enter up to 5 existing Lambda layer ARNs (comma-separated): arn:aws:lambda:eu-central-1:017000801446:layer:AWSLambdaPowertoolsPython:21 ? Do you want to edit the local lambda function now? No ``` @@ -176,7 +176,7 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: Change {region} to your AWS region, e.g. `eu-west-1` ```bash title="AWS CLI" - aws lambda get-layer-version-by-arn --arn arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:20 --region {region} + aws lambda get-layer-version-by-arn --arn arn:aws:lambda:{region}:017000801446:layer:AWSLambdaPowertoolsPython:21 --region {region} ``` The pre-signed URL to download this Lambda Layer will be within `Location` key. @@ -187,17 +187,16 @@ You can include Lambda Powertools Lambda Layer using [AWS Lambda Console](https: Lambda Powertools Lambda Layer do not include `pydantic` library - required dependency for the `parser` utility. See [SAR](#sar) option instead. - #### SAR Serverless Application Repository (SAR) App deploys a CloudFormation stack with a copy of our Lambda Layer in your AWS account and region. Despite having more steps compared to the [public Layer ARN](#lambda-layer) option, the benefit is that you can specify a semantic version you want to use. -| App | ARN | Description -|----------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- -| [aws-lambda-powertools-python-layer](https://serverlessrepo.aws.amazon.com/applications/eu-west-1/057560766410/aws-lambda-powertools-python-layer) | [arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer](#){: .copyMe}:clipboard: | Core dependencies only; sufficient for nearly all utilities. -| [aws-lambda-powertools-python-layer-extras](https://serverlessrepo.aws.amazon.com/applications/eu-west-1/057560766410/aws-lambda-powertools-python-layer-extras) | [arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer-extras](#){: .copyMe}:clipboard: | Core plus extra dependencies such as `pydantic` that is required by `parser` utility. +| App | ARN | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| [aws-lambda-powertools-python-layer](https://serverlessrepo.aws.amazon.com/applications/eu-west-1/057560766410/aws-lambda-powertools-python-layer) | [arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer](#){: .copyMe}:clipboard: | Core dependencies only; sufficient for nearly all utilities. | +| [aws-lambda-powertools-python-layer-extras](https://serverlessrepo.aws.amazon.com/applications/eu-west-1/057560766410/aws-lambda-powertools-python-layer-extras) | [arn:aws:serverlessrepo:eu-west-1:057560766410:applications/aws-lambda-powertools-python-layer-extras](#){: .copyMe}:clipboard: | Core plus extra dependencies such as `pydantic` that is required by `parser` utility. | ???+ warning **Layer-extras** does not support Python 3.6 runtime. This layer also includes all extra dependencies: `22.4MB zipped`, `~155MB unzipped`. @@ -205,7 +204,6 @@ Despite having more steps compared to the [public Layer ARN](#lambda-layer) opti ???+ tip You can create a shared Lambda Layers stack and make this along with other account level layers stack. - If using SAM, you can include this SAR App as part of your shared Layers stack, and lock to a specific semantic version. Once deployed, it'll be available across the account this is deployed to. === "SAM" @@ -414,41 +412,41 @@ sam init --location https://github.com/aws-samples/cookiecutter-aws-sam-python Core utilities such as Tracing, Logging, Metrics, and Event Handler will be available across all Lambda Powertools languages. Additional utilities are subjective to each language ecosystem and customer demand. -| Utility | Description -| ------------------------------------------------- | --------------------------------------------------------------------------------- -[Tracing](./core/tracer.md) | Decorators and utilities to trace Lambda function handlers, and both synchronous and asynchronous functions -[Logger](./core/logger.md) | Structured logging made easier, and decorator to enrich structured logging with key Lambda context details -[Metrics](./core/metrics.md) | Custom Metrics created asynchronously via CloudWatch Embedded Metric Format (EMF) -[Event handler: AppSync](./core/event_handler/appsync.md) | AppSync event handler for Lambda Direct Resolver and Amplify GraphQL Transformer function -[Event handler: API Gateway and ALB](https://awslabs.github.io/aws-lambda-powertools-python/latest/core/event_handler/api_gateway/) | Amazon API Gateway REST/HTTP API and ALB event handler for Lambda functions invoked using Proxy integration -[Middleware factory](./utilities/middleware_factory.md) | Decorator factory to create your own middleware to run logic before, and after each Lambda invocation -[Parameters](./utilities/parameters.md) | Retrieve parameter values from AWS Systems Manager Parameter Store, AWS Secrets Manager, or Amazon DynamoDB, and cache them for a specific amount of time -[Batch processing](./utilities/batch.md) | Handle partial failures for AWS SQS batch processing -[Typing](./utilities/typing.md) | Static typing classes to speedup development in your IDE -[Validation](./utilities/validation.md) | JSON Schema validator for inbound events and responses -[Event source data classes](./utilities/data_classes.md) | Data classes describing the schema of common Lambda event triggers -[Parser](./utilities/parser.md) | Data parsing and deep validation using Pydantic -[Idempotency](./utilities/idempotency.md) | Idempotent Lambda handler -[Feature Flags](./utilities/feature_flags.md) | A simple rule engine to evaluate when one or multiple features should be enabled depending on the input +| Utility | Description | +| ----------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Tracing](./core/tracer.md) | Decorators and utilities to trace Lambda function handlers, and both synchronous and asynchronous functions | +| [Logger](./core/logger.md) | Structured logging made easier, and decorator to enrich structured logging with key Lambda context details | +| [Metrics](./core/metrics.md) | Custom Metrics created asynchronously via CloudWatch Embedded Metric Format (EMF) | +| [Event handler: AppSync](./core/event_handler/appsync.md) | AppSync event handler for Lambda Direct Resolver and Amplify GraphQL Transformer function | +| [Event handler: API Gateway and ALB](https://awslabs.github.io/aws-lambda-powertools-python/latest/core/event_handler/api_gateway/) | Amazon API Gateway REST/HTTP API and ALB event handler for Lambda functions invoked using Proxy integration | +| [Middleware factory](./utilities/middleware_factory.md) | Decorator factory to create your own middleware to run logic before, and after each Lambda invocation | +| [Parameters](./utilities/parameters.md) | Retrieve parameter values from AWS Systems Manager Parameter Store, AWS Secrets Manager, or Amazon DynamoDB, and cache them for a specific amount of time | +| [Batch processing](./utilities/batch.md) | Handle partial failures for AWS SQS batch processing | +| [Typing](./utilities/typing.md) | Static typing classes to speedup development in your IDE | +| [Validation](./utilities/validation.md) | JSON Schema validator for inbound events and responses | +| [Event source data classes](./utilities/data_classes.md) | Data classes describing the schema of common Lambda event triggers | +| [Parser](./utilities/parser.md) | Data parsing and deep validation using Pydantic | +| [Idempotency](./utilities/idempotency.md) | Idempotent Lambda handler | +| [Feature Flags](./utilities/feature_flags.md) | A simple rule engine to evaluate when one or multiple features should be enabled depending on the input | ## Environment variables ???+ info Explicit parameters take precedence over environment variables -| Environment variable | Description | Utility | Default | -| ------------------------------------------------- | --------------------------------------------------------------------------------- | --------------------------------------------------------------------------------- | ------------------------------------------------- | -| **POWERTOOLS_SERVICE_NAME** | Sets service name used for tracing namespace, metrics dimension and structured logging | All | `"service_undefined"` | -| **POWERTOOLS_METRICS_NAMESPACE** | Sets namespace used for metrics | [Metrics](./core/metrics) | `None` | -| **POWERTOOLS_TRACE_DISABLED** | Explicitly disables tracing | [Tracing](./core/tracer) | `false` | -| **POWERTOOLS_TRACER_CAPTURE_RESPONSE** | Captures Lambda or method return as metadata. | [Tracing](./core/tracer) | `true` | -| **POWERTOOLS_TRACER_CAPTURE_ERROR** | Captures Lambda or method exception as metadata. | [Tracing](./core/tracer) | `true` | -| **POWERTOOLS_TRACE_MIDDLEWARES** | Creates sub-segment for each custom middleware | [Middleware factory](./utilities/middleware_factory) | `false` | -| **POWERTOOLS_LOGGER_LOG_EVENT** | Logs incoming event | [Logging](./core/logger) | `false` | -| **POWERTOOLS_LOGGER_SAMPLE_RATE** | Debug log sampling | [Logging](./core/logger) | `0` | -| **POWERTOOLS_LOG_DEDUPLICATION_DISABLED** | Disables log deduplication filter protection to use Pytest Live Log feature | [Logging](./core/logger) | `false` | -| **POWERTOOLS_EVENT_HANDLER_DEBUG** | Enables debugging mode for event handler | [Event Handler](./core/event_handler/api_gateway.md#debug-mode) | `false` | -| **LOG_LEVEL** | Sets logging level | [Logging](./core/logger) | `INFO` | +| Environment variable | Description | Utility | Default | +| ----------------------------------------- | -------------------------------------------------------------------------------------- | --------------------------------------------------------------- | --------------------- | +| **POWERTOOLS_SERVICE_NAME** | Sets service name used for tracing namespace, metrics dimension and structured logging | All | `"service_undefined"` | +| **POWERTOOLS_METRICS_NAMESPACE** | Sets namespace used for metrics | [Metrics](./core/metrics) | `None` | +| **POWERTOOLS_TRACE_DISABLED** | Explicitly disables tracing | [Tracing](./core/tracer) | `false` | +| **POWERTOOLS_TRACER_CAPTURE_RESPONSE** | Captures Lambda or method return as metadata. | [Tracing](./core/tracer) | `true` | +| **POWERTOOLS_TRACER_CAPTURE_ERROR** | Captures Lambda or method exception as metadata. | [Tracing](./core/tracer) | `true` | +| **POWERTOOLS_TRACE_MIDDLEWARES** | Creates sub-segment for each custom middleware | [Middleware factory](./utilities/middleware_factory) | `false` | +| **POWERTOOLS_LOGGER_LOG_EVENT** | Logs incoming event | [Logging](./core/logger) | `false` | +| **POWERTOOLS_LOGGER_SAMPLE_RATE** | Debug log sampling | [Logging](./core/logger) | `0` | +| **POWERTOOLS_LOG_DEDUPLICATION_DISABLED** | Disables log deduplication filter protection to use Pytest Live Log feature | [Logging](./core/logger) | `false` | +| **POWERTOOLS_EVENT_HANDLER_DEBUG** | Enables debugging mode for event handler | [Event Handler](./core/event_handler/api_gateway.md#debug-mode) | `false` | +| **LOG_LEVEL** | Sets logging level | [Logging](./core/logger) | `INFO` | ## Debug mode @@ -460,7 +458,7 @@ from aws_lambda_powertools.logging.logger import set_package_logger set_package_logger() # (1) ``` -1. :information_source: this will configure our `aws_lambda_powertools` logger with debug. +1. :information_source: this will configure our `aws_lambda_powertools` logger with debug. ## Tenets diff --git a/docs/roadmap.md b/docs/roadmap.md index 231a63927ec..bc0cfb55be2 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -4,7 +4,6 @@ This is our public roadmap that outlines the high level direction we are working towards, namely [Themes](#themes). We update this document when our priorities change: security and stability is our top priority. - [See our latest list of activities »](https://github.com/orgs/awslabs/projects/51/views/1?query=is%3Aopen+sort%3Aupdated-desc){target="_blank"} ## Themes @@ -25,7 +24,9 @@ We will remove support for Python 3.6 after July 18th, following AWS Lambda [dep ### Reduce release operational overhead -We are working on a consistent label and automation strategy across all Lambda Powertools projects ([Java](https://awslabs.github.io/aws-lambda-powertools-java/){target="_blank"}, [TypeScript](https://awslabs.github.io/aws-lambda-powertools-typescript/latest/){target="_blank"}). This will be our baseline to automate areas where we don't need human intervention, and reduce our manual effort to areas where clear communication is crucial. +We are working on a consistent label and automation strategy across all Lambda Powertools projects ([Java](https://awslabs.github.io/aws-lambda-powertools-java/){target="_blank"}, [TypeScript](https://awslabs.github.io/aws-lambda-powertools-typescript/latest/){target="_blank"}). + +This will be our baseline to automate areas where we don't need human intervention, and reduce our manual effort to areas where clear communication is crucial. ### Revamp roadmap @@ -71,8 +72,10 @@ graph LR Our end-to-end mechanism follows four major steps: -* **Feature Request**. Ideas start with a [feature request issue template](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=feature-request%2Ctriage&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"} to highlight their use case at a high level. Maintainers review each request based on **(1)** [project tenets](index.md#tenets){target="_blank"}, **(2)** customers reaction (👍) and use cases, and comment whether we'll need a RFC for further discussion before any work begins. -* **Request-for-comments (RFC)**. Design proposals use our [RFC issue template](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=RFC%2Ctriage&template=rfc.yml&title=RFC%3A+TITLE){target="_blank"} to describe its implementation, challenges, developer experience, dependencies, and alternative solutions. This helps refine the initial idea with community feedback before a decision is made. +* **Feature Request**. Ideas start with a [feature request](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=feature-request%2Ctriage&template=feature_request.yml&title=Feature+request%3A+TITLE){target="_blank"} to outline their use case at a high level. For complex use cases, maintainers might ask for/write a RFC. + * Maintainers review requests based on [project tenets](index.md#tenets){target="_blank"}, customers reaction (👍), and use cases. +* **Request-for-comments (RFC)**. Design proposals use our [RFC issue template](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=RFC%2Ctriage&template=rfc.yml&title=RFC%3A+TITLE){target="_blank"} to describe its implementation, challenges, developer experience, dependencies, and alternative solutions. + * This helps refine the initial idea with community feedback before a decision is made. * **Decision**. After carefully reviewing and discussing them, maintainers make a final decision on whether to start implementation, defer or reject it, and update everyone with the next steps. * **Implementation**. For approved features, maintainers give priority to the original authors for implementation unless it is a sensitive task that is best handled by maintainers. @@ -84,7 +87,6 @@ The AWS Lambda Powertools team values feedback and guidance from its community o We determine the high-level direction for our open roadmap based on customer feedback and popularity (👍🏽 and comments), security and operational impacts, and business value. Where features don’t meet our goals and longer-term strategy, we will communicate that clearly and openly as quickly as possible with an explanation of why the decision was made. - ## FAQs **Q: Why did you build this?** diff --git a/docs/tutorial/index.md b/docs/tutorial/index.md index 5ea8ec7f2fa..e6f7cbfed29 100644 --- a/docs/tutorial/index.md +++ b/docs/tutorial/index.md @@ -82,6 +82,7 @@ When API Gateway receives a HTTP GET request on `/hello` route, Lambda will call ???+ warning For simplicity, we do not set up authentication and authorization! You can find more information on how to implement it on [AWS SAM documentation](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-controlling-access-to-apis.html){target="_blank"}. + ### Run your code At each point, you have two ways to run your code: locally and within your AWS account. @@ -106,7 +107,6 @@ As a result, a local API endpoint will be exposed and you can invoke it using yo ???+ info To learn more about local testing, please visit the [AWS SAM CLI local testing](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-cli-command-reference-sam-local-start-api.html) documentation. - #### Live test First, you need to deploy your application into your AWS Account by issuing `sam build && sam deploy --guided` command. This command builds a ZIP package of your source code, and deploy it to your AWS Account. @@ -357,9 +357,9 @@ Lastly, we used `return app.resolve(event, context)` so Event Handler can resolv From here, we could handle [404 routes](../core/event_handler/api_gateway.md#handling-not-found-routes){target="_blank"}, [error handling](../core/event_handler/api_gateway.md#exception-handling){target="_blank"}, [access query strings, payload](../core/event_handler/api_gateway.md#accessing-request-details){target="_blank"}, etc. - ???+ tip If you'd like to learn how python decorators work under the hood, you can follow [Real Python](https://realpython.com/primer-on-python-decorators/)'s article. + ## Structured Logging Over time, you realize that searching logs as text results in poor observability, it's hard to create metrics from, enumerate common exceptions, etc. @@ -443,7 +443,6 @@ So far, so good! We can take a step further now by adding additional context to We could start by creating a dictionary with Lambda context information or something from the incoming event, which should always be logged. Additional attributes could be added on every `logger.info` using `extra` keyword like in any standard Python logger. - ### Simplifying with Logger ???+ question "Surely this could be easier, right?" @@ -485,7 +484,6 @@ Let's break this down: * **L22**: We also instruct Logger to use the incoming API Gateway Request ID as a [correlation id](../core/logger.md##set_correlation_id-method) automatically. * **L22**: Since we're in dev, we also use `log_event=True` to automatically log each incoming request for debugging. This can be also set via [environment variables](./index.md#environment-variables){target="_blank"}. - This is how the logs would look like now: ```json title="Our logs are now structured consistently" @@ -707,7 +705,9 @@ Let's break it down: ???+ info If you want to understand how the Lambda execution environment (sandbox) works and why cold starts can occur, see this [blog series on Lambda performance](https://aws.amazon.com/blogs/compute/operating-lambda-performance-optimization-part-1/). -Repeat the process of building, deploying, and invoking your application via the API endpoint. Within the [AWS X-Ray Console](https://console.aws.amazon.com/xray/home#/traces/){target="_blank"}, you should now be able to group traces by the `User` and `ColdStart` annotation. +Repeat the process of building, deploying, and invoking your application via the API endpoint. + +Within the [AWS X-Ray Console](https://console.aws.amazon.com/xray/home#/traces/){target="_blank"}, you should now be able to group traces by the `User` and `ColdStart` annotation. ![Filtering traces by annotations](../media/tracer_xray_sdk_enriched.png) @@ -772,7 +772,6 @@ Lambda Powertools optimizes for Lambda compute environment. As such, we add thes Repeat the process of building, deploying, and invoking your application via the API endpoint. Within the [AWS X-Ray Console](https://console.aws.amazon.com/xray/home#/traces/){target="_blank"}, you should see a similar view: - ![AWS X-Ray Console trace view using Lambda Powertools Tracer](../media/tracer_utility_showcase_2.png) ???+ tip @@ -791,7 +790,7 @@ From here, you can browse to specific logs in CloudWatch Logs Insight, Metrics D Let's add custom metrics to better understand our application and business behavior (e.g. number of reservations, etc.). -Out of the box, AWS Lambda adds [invocation, performance, and concurrency metrics](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-metrics.html#monitoring-metrics-types){target="_blank"}. Amazon API Gateway also adds [general metrics at the aggregate level](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html#api-gateway-metrics) such as latency, number of requests received, etc. +By default, AWS Lambda adds [invocation and performance metrics](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-metrics.html#monitoring-metrics-types){target="_blank"}, and Amazon API Gateway adds [latency and some HTTP metrics](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html#api-gateway-metrics). ???+ tip You can [optionally enable detailed metrics](https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-metrics-and-dimensions.html#api-gateway-metricdimensions){target="_blank"} per each API route, stage, and method in API Gateway. @@ -915,7 +914,8 @@ There's a lot going on, let's break this down: * **L10**: We define a container where all of our application metrics will live `MyApp`, a.k.a [Metrics Namespace](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html){target="_blank"}. * **L14**: We initialize a CloudWatch client to send metrics later. -* **L19-47**: We create a custom function to prepare and send `ColdStart` and `SuccessfulGreetings` metrics using CloudWatch expected data structure. We also set [dimensions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension){target="_blank"} of these metrics - Think of them as metadata to define to slice and dice them later; an unique metric is a combination of metric name + metric dimension(s). +* **L19-47**: We create a custom function to prepare and send `ColdStart` and `SuccessfulGreetings` metrics using CloudWatch expected data structure. We also set [dimensions](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension){target="_blank"} of these metrics. + * Think of them as metadata to define to slice and dice them later; an unique metric is a combination of metric name + metric dimension(s). * **L55,64**: We call our custom function to create metrics for every greeting received. ???+ question @@ -988,7 +988,9 @@ That's a lot less boilerplate code! Let's break this down: * **L33**: We use `@metrics.log_metrics` decorator to ensure that our metrics are aligned with the EMF output and validated before-hand, like in case we forget to set namespace, or accidentally use a metric unit as a string that doesn't exist in CloudWatch. * **L33**: We also use `capture_cold_start_metric=True` so we don't have to handle that logic either. Note that [Metrics](../core/metrics.md){target="_blank"} does not publish a warm invocation metric (ColdStart=0) for cost reasons. As such, treat the absence (sparse metric) as a non-cold start invocation. -Repeat the process of building, deploying, and invoking your application via the API endpoint a few times to generate metrics - [Artillery](https://www.artillery.io/){target="_blank"} and [K6.io](https://k6.io/open-source){target="_blank"} are quick ways to generate some load. Within [CloudWatch Metrics view](https://console.aws.amazon.com/cloudwatch/home#metricsV2:graph=~()){target="_blank}, you should see `MyApp` custom namespace with your custom metrics there and `SuccessfulGreetings` available to graph. +Repeat the process of building, deploying, and invoking your application via the API endpoint a few times to generate metrics - [Artillery](https://www.artillery.io/){target="_blank"} and [K6.io](https://k6.io/open-source){target="_blank"} are quick ways to generate some load. + +Within [CloudWatch Metrics view](https://console.aws.amazon.com/cloudwatch/home#metricsV2:graph=~()){target="_blank}, you should see `MyApp` custom namespace with your custom metrics there and `SuccessfulGreetings` available to graph. ![Custom Metrics Example](../media/metrics_utility_showcase.png) @@ -1024,7 +1026,7 @@ If you're curious about how the EMF portion of your function logs look like, you } ``` -# Final considerations +## Final considerations We covered a lot of ground here and we only scratched the surface of the feature set available within Lambda Powertools. @@ -1038,4 +1040,4 @@ This requires a change in mindset to ensure operational excellence is part of th Lambda Powertools is largely designed to make some of these practices easier to adopt from day 1. ???+ question "Have ideas for other tutorials?" - You can open up a [documentation issue](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=documentation&template=documentation-improvements.md&title=Tutorial%20Suggestion){target="_blank"}, or connect with us on the [AWS Developers Slack](https://github.com/awslabs/aws-lambda-powertools-python/#connect) at `lambda-powertools` channel, or via e-mail [aws-lambda-powertools-feedback@amazon.com](mailto:aws-lambda-powertools-feedback@amazon.com). + You can open up a [documentation issue](https://github.com/awslabs/aws-lambda-powertools-python/issues/new?assignees=&labels=documentation&template=documentation-improvements.md&title=Tutorial%20Suggestion){target="_blank"}, or via e-mail [aws-lambda-powertools-feedback@amazon.com](mailto:aws-lambda-powertools-feedback@amazon.com). diff --git a/docs/utilities/batch.md b/docs/utilities/batch.md index 14dc80bdb11..ce2e76e25d4 100644 --- a/docs/utilities/batch.md +++ b/docs/utilities/batch.md @@ -20,9 +20,11 @@ If your function fails to process any message from the batch, the entire batch r With this utility, batch records are processed individually – only messages that failed to be processed return to the queue or stream for a further retry. This works when two mechanisms are in place: -1. `ReportBatchItemFailures` is set in your SQS, Kinesis, or DynamoDB event source properties -2. [A specific response](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#sqs-batchfailurereporting-syntax){target="_blank"} is returned so Lambda knows which records should not be deleted during partial responses +1. `ReportBatchItemFailures` is set in your SQS, Kinesis, or DynamoDB event source properties +2. [A specific response](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#sqs-batchfailurereporting-syntax){target="_blank"} is returned so Lambda knows which records should not be deleted during partial responses + + ???+ warning "Warning: This utility lowers the chance of processing records more than once; it does not guarantee it" We recommend implementing processing logic in an [idempotent manner](idempotency.md){target="_blank"} wherever possible. @@ -38,7 +40,6 @@ You do not need any additional IAM permissions to use this utility, except for w The remaining sections of the documentation will rely on these samples. For completeness, this demonstrates IAM permissions and Dead Letter Queue where batch records will be sent after 2 retries were attempted. - === "SQS" ```yaml title="template.yaml" hl_lines="31-32" @@ -220,10 +221,10 @@ The remaining sections of the documentation will rely on these samples. For comp Processing batches from SQS works in four stages: -1. Instantiate **`BatchProcessor`** and choose **`EventType.SQS`** for the event type -2. Define your function to handle each batch record, and use [`SQSRecord`](data_classes.md#sqs){target="_blank"} type annotation for autocompletion -3. Use either **`batch_processor`** decorator or your instantiated processor as a context manager to kick off processing -4. Return the appropriate response contract to Lambda via **`.response()`** processor method +1. Instantiate **`BatchProcessor`** and choose **`EventType.SQS`** for the event type +2. Define your function to handle each batch record, and use [`SQSRecord`](data_classes.md#sqs){target="_blank"} type annotation for autocompletion +3. Use either **`batch_processor`** decorator or your instantiated processor as a context manager to kick off processing +4. Return the appropriate response contract to Lambda via **`.response()`** processor method ???+ info This code example optionally uses Tracer and Logger for completion. @@ -350,10 +351,10 @@ Processing batches from SQS works in four stages: Processing batches from Kinesis works in four stages: -1. Instantiate **`BatchProcessor`** and choose **`EventType.KinesisDataStreams`** for the event type -2. Define your function to handle each batch record, and use [`KinesisStreamRecord`](data_classes.md#kinesis-streams){target="_blank"} type annotation for autocompletion -3. Use either **`batch_processor`** decorator or your instantiated processor as a context manager to kick off processing -4. Return the appropriate response contract to Lambda via **`.response()`** processor method +1. Instantiate **`BatchProcessor`** and choose **`EventType.KinesisDataStreams`** for the event type +2. Define your function to handle each batch record, and use [`KinesisStreamRecord`](data_classes.md#kinesis-streams){target="_blank"} type annotation for autocompletion +3. Use either **`batch_processor`** decorator or your instantiated processor as a context manager to kick off processing +4. Return the appropriate response contract to Lambda via **`.response()`** processor method ???+ info This code example optionally uses Tracer and Logger for completion. @@ -433,7 +434,6 @@ Processing batches from Kinesis works in four stages: } ``` - === "Sample event" ```json @@ -475,15 +475,14 @@ Processing batches from Kinesis works in four stages: } ``` - ### Processing messages from DynamoDB Processing batches from Kinesis works in four stages: -1. Instantiate **`BatchProcessor`** and choose **`EventType.DynamoDBStreams`** for the event type -2. Define your function to handle each batch record, and use [`DynamoDBRecord`](data_classes.md#dynamodb-streams){target="_blank"} type annotation for autocompletion -3. Use either **`batch_processor`** decorator or your instantiated processor as a context manager to kick off processing -4. Return the appropriate response contract to Lambda via **`.response()`** processor method +1. Instantiate **`BatchProcessor`** and choose **`EventType.DynamoDBStreams`** for the event type +2. Define your function to handle each batch record, and use [`DynamoDBRecord`](data_classes.md#dynamodb-streams){target="_blank"} type annotation for autocompletion +3. Use either **`batch_processor`** decorator or your instantiated processor as a context manager to kick off processing +4. Return the appropriate response contract to Lambda via **`.response()`** processor method ???+ info This code example optionally uses Tracer and Logger for completion. @@ -569,7 +568,6 @@ Processing batches from Kinesis works in four stages: } ``` - === "Sample event" ```json @@ -638,7 +636,6 @@ All records in the batch will be passed to this handler for processing, even if All processing logic will and should be performed by the `record_handler` function. - ## Advanced ### Pydantic integration @@ -647,7 +644,6 @@ You can bring your own Pydantic models via **`model`** parameter when inheriting Inheritance is importance because we need to access message IDs and sequence numbers from these records in the event of failure. Mypy is fully integrated with this utility, so it should identify whether you're passing the incorrect Model. - === "SQS" ```python hl_lines="5 9-10 12-19 21 27" @@ -789,7 +785,6 @@ Use the context manager to access a list of all returned values from your `recor * **When successful**. We will include a tuple with `success`, the result of `record_handler`, and the batch record * **When failed**. We will include a tuple with `fail`, exception as a string, and the batch record - ```python hl_lines="31-38" title="Accessing processed messages via context manager" import json @@ -833,7 +828,6 @@ def lambda_handler(event, context: LambdaContext): return processor.response() ``` - ### Extending BatchProcessor You might want to bring custom logic to the existing `BatchProcessor` to slightly override how we handle successes and failures. @@ -958,7 +952,6 @@ When using Tracer to capture responses for each batch record processing, you mig If that's the case, you can configure [Tracer to disable response auto-capturing](../core/tracer.md#disabling-response-auto-capture){target="_blank"}. - ```python hl_lines="14" title="Disabling Tracer response auto-capturing" import json @@ -1123,8 +1116,6 @@ Given a SQS batch where the first batch record succeeds and the second fails pro } ``` - - ## FAQ ### Choosing between decorator and context manager @@ -1150,13 +1141,11 @@ class MyProcessor(BatchProcessor): return super().failure_handler(record, exception) ``` - ## Legacy ???+ tip This is kept for historical purposes. Use the new [BatchProcessor](#processing-messages-from-sqs) instead. - ### Migration guide ???+ info @@ -1175,7 +1164,6 @@ You can migrate in three steps: 2. If you were using **`PartialSQSProcessor`** you can now use **`BatchProcessor`** 3. Change your Lambda Handler to return the new response format - === "Decorator: Before" ```python hl_lines="1 6" @@ -1207,7 +1195,6 @@ You can migrate in three steps: return processor.response() ``` - === "Context manager: Before" ```python hl_lines="1-2 4 14 19" diff --git a/docs/utilities/feature_flags.md b/docs/utilities/feature_flags.md index 95efc5d051c..1d586d9377d 100644 --- a/docs/utilities/feature_flags.md +++ b/docs/utilities/feature_flags.md @@ -387,7 +387,6 @@ You can use `get_enabled_features` method for scenarios where you need a list of Feature flags can return any JSON values when `boolean_type` parameter is set to `false`. These can be dictionaries, list, string, integers, etc. - === "app.py" ```python hl_lines="3 9 13 16 18" @@ -593,7 +592,6 @@ Action | Equivalent expression **VALUE_IN_KEY** | `lambda a, b: b in a` **VALUE_NOT_IN_KEY** | `lambda a, b: b not in a` - ???+ info The `**key**` and `**value**` will be compared to the input from the `**context**` parameter. @@ -655,7 +653,6 @@ For this to work, you need to use a JMESPath expression via the `envelope` param } ``` - ### Built-in store provider ???+ info @@ -678,7 +675,6 @@ Parameter | Default | Description **jmespath_options** | `None` | For advanced use cases when you want to bring your own [JMESPath functions](https://github.com/jmespath/jmespath.py#custom-functions){target="_blank"} **logger** | `logging.Logger` | Logger to use for debug. You can optionally supply an instance of Powertools Logger. - ```python hl_lines="21-27" title="AppConfigStore sample" from botocore.config import Config @@ -778,7 +774,6 @@ Method | When to use | Requires new deployment on changes | Supported services **[Parameters utility](parameters.md)** | Access to secrets, or fetch parameters in different formats from AWS System Manager Parameter Store or Amazon DynamoDB. | No | Parameter Store, DynamoDB, Secrets Manager, AppConfig **Feature flags utility** | Rule engine to define when one or multiple features should be enabled depending on the input. | No | AppConfig - ## Deprecation list when GA Breaking change | Recommendation diff --git a/docs/utilities/idempotency.md b/docs/utilities/idempotency.md index 4b03b66abd4..a5ed14b9150 100644 --- a/docs/utilities/idempotency.md +++ b/docs/utilities/idempotency.md @@ -350,7 +350,6 @@ Imagine the function executes successfully, but the client never receives the re } ``` - ### Idempotency request flow This sequence diagram shows an example flow of what happens in the payment scenario: @@ -367,7 +366,6 @@ The client was successful in receiving the result after the retry. Since the Lam If you are using the `idempotent` decorator on your Lambda handler, any unhandled exceptions that are raised during the code execution will cause **the record in the persistence layer to be deleted**. This means that new invocations will execute your code again despite having the same payload. If you don't want the record to be deleted, you need to catch exceptions within the idempotent function and return a successful response. - ![Idempotent sequence exception](../media/idempotent_sequence_exception.png) If you are using `idempotent_function`, any unhandled exceptions that are raised _inside_ the decorated function will cause the record in the persistence layer to be deleted, and allow the function to be executed again if retried. @@ -886,12 +884,12 @@ def lambda_handler(event, context): ???+ tip "Tip: JMESPath Powertools functions are also available" Built-in functions known in the validation utility like `powertools_json`, `powertools_base64`, `powertools_base64_gzip` are also available to use in this utility. - ## Testing your code The idempotency utility provides several routes to test your code. ### Disabling the idempotency utility + When testing your code, you may wish to disable the idempotency logic altogether and focus on testing your business logic. To do this, you can set the environment variable `POWERTOOLS_IDEMPOTENCY_DISABLED` with a truthy value. If you prefer setting this for specific tests, and are using Pytest, you can use [monkeypatch](https://docs.pytest.org/en/latest/monkeypatch.html) fixture: diff --git a/docs/utilities/jmespath_functions.md b/docs/utilities/jmespath_functions.md index 03b5fce1fd5..eee88c13cfb 100644 --- a/docs/utilities/jmespath_functions.md +++ b/docs/utilities/jmespath_functions.md @@ -107,6 +107,7 @@ Envelope | JMESPath expression ## Advanced ### Built-in JMESPath functions + You can use our built-in JMESPath functions within your expressions to do exactly that to decode JSON Strings, base64, and uncompress gzip data. ???+ info diff --git a/docs/utilities/parameters.md b/docs/utilities/parameters.md index 36990fdd2cb..2559044b632 100644 --- a/docs/utilities/parameters.md +++ b/docs/utilities/parameters.md @@ -3,7 +3,7 @@ title: Parameters description: Utility --- - + The parameters utility provides high-level functions to retrieve one or multiple parameter values from [AWS Systems Manager Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html){target="_blank"}, [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/){target="_blank"}, [AWS AppConfig](https://docs.aws.amazon.com/appconfig/latest/userguide/what-is-appconfig.html){target="_blank"}, [Amazon DynamoDB](https://aws.amazon.com/dynamodb/){target="_blank"}, or bring your own. ## Key features @@ -479,7 +479,6 @@ Here is the mapping between this utility's functions and methods and the underly | DynamoDB | `DynamoDBProvider.get_multiple` | `dynamodb` | ([Table resource](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#table)) | [query](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.query) | | App Config | `get_app_config` | `appconfig` | [get_configuration](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/appconfig.html#AppConfig.Client.get_configuration) | - ### Bring your own boto client You can use `boto3_client` parameter via any of the available [Provider Classes](#built-in-provider-class). Some providers expect a low level boto3 client while others expect a high level boto3 client, here is the mapping for each of them: @@ -491,7 +490,6 @@ You can use `boto3_client` parameter via any of the available [Provider Classes] | [AppConfigProvider](#appconfigprovider) | low level | `boto3.client("appconfig")` | | [DynamoDBProvider](#dynamodbprovider) | high level | `boto3.resource("dynamodb")` | - Bringing them together in a single code snippet would look like this: ```python title="Example: passing a custom boto3 client for each provider" @@ -571,7 +569,6 @@ The **`config`** , **`boto3_session`**, and **`boto3_client`** parameters enabl ... ``` - ## Testing your code ### Mocking parameter values @@ -645,14 +642,12 @@ object named `get_parameter_mock`. ``` - ### Clearing cache Parameters utility caches all parameter values for performance and cost reasons. However, this can have unintended interference in tests using the same parameter name. Within your tests, you can use `clear_cache` method available in [every provider](#built-in-provider-class). When using multiple providers or higher level functions like `get_parameter`, use `clear_caches` standalone function to clear cache globally. - === "clear_cache method" ```python hl_lines="9" import pytest diff --git a/docs/utilities/parser.md b/docs/utilities/parser.md index c17e2f173c5..8756725d1e0 100644 --- a/docs/utilities/parser.md +++ b/docs/utilities/parser.md @@ -152,20 +152,20 @@ def my_function(): Parser comes with the following built-in models: -| Model name | Description | -| --------------------------------- | ------------------------------------------------------------------ | -| **DynamoDBStreamModel** | Lambda Event Source payload for Amazon DynamoDB Streams | -| **EventBridgeModel** | Lambda Event Source payload for Amazon EventBridge | -| **SqsModel** | Lambda Event Source payload for Amazon SQS | -| **AlbModel** | Lambda Event Source payload for Amazon Application Load Balancer | -| **CloudwatchLogsModel** | Lambda Event Source payload for Amazon CloudWatch Logs | -| **S3Model** | Lambda Event Source payload for Amazon S3 | -| **S3ObjectLambdaEvent** | Lambda Event Source payload for Amazon S3 Object Lambda | -| **KinesisDataStreamModel** | Lambda Event Source payload for Amazon Kinesis Data Streams | -| **SesModel** | Lambda Event Source payload for Amazon Simple Email Service | -| **SnsModel** | Lambda Event Source payload for Amazon Simple Notification Service | -| **APIGatewayProxyEventModel** | Lambda Event Source payload for Amazon API Gateway | -| **APIGatewayProxyEventV2Model** | Lambda Event Source payload for Amazon API Gateway v2 payload | +| Model name | Description | +| ------------------------------- | ------------------------------------------------------------------ | +| **DynamoDBStreamModel** | Lambda Event Source payload for Amazon DynamoDB Streams | +| **EventBridgeModel** | Lambda Event Source payload for Amazon EventBridge | +| **SqsModel** | Lambda Event Source payload for Amazon SQS | +| **AlbModel** | Lambda Event Source payload for Amazon Application Load Balancer | +| **CloudwatchLogsModel** | Lambda Event Source payload for Amazon CloudWatch Logs | +| **S3Model** | Lambda Event Source payload for Amazon S3 | +| **S3ObjectLambdaEvent** | Lambda Event Source payload for Amazon S3 Object Lambda | +| **KinesisDataStreamModel** | Lambda Event Source payload for Amazon Kinesis Data Streams | +| **SesModel** | Lambda Event Source payload for Amazon Simple Email Service | +| **SnsModel** | Lambda Event Source payload for Amazon Simple Notification Service | +| **APIGatewayProxyEventModel** | Lambda Event Source payload for Amazon API Gateway | +| **APIGatewayProxyEventV2Model** | Lambda Event Source payload for Amazon API Gateway v2 payload | ### extending built-in models @@ -174,7 +174,6 @@ You can extend them to include your own models, and yet have all other known fie ???+ tip For Mypy users, we only allow type override for fields where payload is injected e.g. `detail`, `body`, etc. - ```python hl_lines="16-17 28 41" title="Extending EventBridge model as an example" from aws_lambda_powertools.utilities.parser import parse, BaseModel from aws_lambda_powertools.utilities.parser.models import EventBridgeModel @@ -470,7 +469,10 @@ parse(model=UserModel, event=payload) ???+ tip "Tip: Looking to auto-generate models from JSON, YAML, JSON Schemas, OpenApi, etc?" Use Koudai Aono's [data model code generation tool for Pydantic](https://github.com/koxudaxi/datamodel-code-generator) -There are number of advanced use cases well documented in Pydantic's doc such as creating [immutable models](https://pydantic-docs.helpmanual.io/usage/models/#faux-immutability), [declaring fields with dynamic values](https://pydantic-docs.helpmanual.io/usage/models/#field-with-dynamic-default-value)) e.g. UUID, and [helper functions to parse models from files, str](https://pydantic-docs.helpmanual.io/usage/models/#helper-functions), etc. +There are number of advanced use cases well documented in Pydantic's doc such as creating [immutable models](https://pydantic-docs.helpmanual.io/usage/models/#faux-immutability), [declaring fields with dynamic values](https://pydantic-docs.helpmanual.io/usage/models/#field-with-dynamic-default-value). + +???+ tip "Pydantic helper functions" + Pydantic also offers [functions](https://pydantic-docs.helpmanual.io/usage/models/#helper-functions) to parse models from files, dicts, string, etc. Two possible unknown use cases are Models and exception' serialization. Models have methods to [export them](https://pydantic-docs.helpmanual.io/usage/exporting_models/) as `dict`, `JSON`, `JSON Schema`, and Validation exceptions can be exported as JSON. @@ -539,7 +541,7 @@ Artillery load test sample against a [hello world sample](https://github.com/aws ???+ info **Uncompressed package size**: 55M, **p99**: 180.3ms -``` +```javascript Summary report @ 14:36:07(+0200) 2020-10-23 Scenarios launched: 10 Scenarios completed: 10 @@ -562,7 +564,7 @@ Codes: ???+ info **Uncompressed package size**: 128M, **p99**: 193.1ms -``` +```javascript Summary report @ 14:29:23(+0200) 2020-10-23 Scenarios launched: 10 Scenarios completed: 10 diff --git a/docs/utilities/typing.md b/docs/utilities/typing.md index c1b4dbad32b..a23d014afa6 100644 --- a/docs/utilities/typing.md +++ b/docs/utilities/typing.md @@ -3,6 +3,8 @@ title: Typing description: Utility --- + + This typing utility provides static typing classes that can be used to ease the development by providing the IDE type hints. ![Utilities Typing](../media/utilities_typing.png) diff --git a/docs/utilities/validation.md b/docs/utilities/validation.md index e6ca0841d2d..ec795c99bef 100644 --- a/docs/utilities/validation.md +++ b/docs/utilities/validation.md @@ -134,7 +134,9 @@ Here is a sample custom EventBridge event, where we only validate what's inside --8<-- "docs/shared/validation_basic_jsonschema.py" ``` -This is quite powerful because you can use JMESPath Query language to extract records from [arrays, slice and dice](https://jmespath.org/tutorial.html#list-and-slice-projections), to [pipe expressions](https://jmespath.org/tutorial.html#pipe-expressions) and [function expressions](https://jmespath.org/tutorial.html#functions), where you'd extract what you need before validating the actual payload. +This is quite powerful because you can use JMESPath Query language to extract records from [arrays](https://jmespath.org/tutorial.html#list-and-slice-projections), combine [pipe](https://jmespath.org/tutorial.html#pipe-expressions) and [function expressions](https://jmespath.org/tutorial.html#functions). + +When combined, these features allow you to extract what you need before validating the actual payload. ### Built-in envelopes @@ -166,16 +168,16 @@ This utility comes with built-in envelopes to easily extract the payload from po Here is a handy table with built-in envelopes along with their JMESPath expressions in case you want to build your own. -Envelope name | JMESPath expression -------------------------------------------------- | --------------------------------------------------------------------------------- -**API_GATEWAY_REST** | "powertools_json(body)" -**API_GATEWAY_HTTP** | "powertools_json(body)" -**SQS** | "Records[*].powertools_json(body)" -**SNS** | "Records[0].Sns.Message | powertools_json(@)" -**EVENTBRIDGE** | "detail" -**CLOUDWATCH_EVENTS_SCHEDULED** | "detail" -**KINESIS_DATA_STREAM** | "Records[*].kinesis.powertools_json(powertools_base64(data))" -**CLOUDWATCH_LOGS** | "awslogs.powertools_base64_gzip(data) | powertools_json(@).logEvents[*]" +| Envelope name | JMESPath expression | +| ------------------------------- | ------------------------------------------------------------- | +| **API_GATEWAY_REST** | "powertools_json(body)" | +| **API_GATEWAY_HTTP** | "powertools_json(body)" | +| **SQS** | "Records[*].powertools_json(body)" | +| **SNS** | "Records[0].Sns.Message | powertools_json(@)" | +| **EVENTBRIDGE** | "detail" | +| **CLOUDWATCH_EVENTS_SCHEDULED** | "detail" | +| **KINESIS_DATA_STREAM** | "Records[*].kinesis.powertools_json(powertools_base64(data))" | +| **CLOUDWATCH_LOGS** | "awslogs.powertools_base64_gzip(data) | powertools_json(@).logEvents[*]" | ## Advanced diff --git a/examples/logger/sam/template.yaml b/examples/logger/sam/template.yaml new file mode 100644 index 00000000000..3f702bfc041 --- /dev/null +++ b/examples/logger/sam/template.yaml @@ -0,0 +1,24 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: AWS Lambda Powertools Tracer doc examples + +Globals: + Function: + Timeout: 5 + Runtime: python3.9 + Tracing: Active + Environment: + Variables: + POWERTOOLS_SERVICE_NAME: payment + LOG_LEVEL: INFO + Layers: + # Find the latest Layer version in the official documentation + # https://awslabs.github.io/aws-lambda-powertools-python/latest/#lambda-layer + - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:21 + +Resources: + LoggerLambdaHandlerExample: + Type: AWS::Serverless::Function + Properties: + CodeUri: ../src + Handler: inject_lambda_context.handler diff --git a/examples/logger/src/append_and_remove_keys.json b/examples/logger/src/append_and_remove_keys.json new file mode 100644 index 00000000000..a058e32c4d1 --- /dev/null +++ b/examples/logger/src/append_and_remove_keys.json @@ -0,0 +1,20 @@ +[ + { + "level": "INFO", + "location": ":16", + "message": "Name should be equal service value", + "name": "payment", + "service": "payment", + "timestamp": "2022-07-01 07:09:46,330+0000" + }, + { + "level": "INFO", + "location": ":23", + "message": "This will include process ID and name", + "name": "payment", + "process": "9", + "processName": "MainProcess", + "service": "payment", + "timestamp": "2022-07-01 07:09:46,330+0000" + } +] diff --git a/examples/logger/src/append_and_remove_keys.py b/examples/logger/src/append_and_remove_keys.py new file mode 100644 index 00000000000..285b0312224 --- /dev/null +++ b/examples/logger/src/append_and_remove_keys.py @@ -0,0 +1,12 @@ +from aws_lambda_powertools import Logger + +logger = Logger(service="payment", name="%(name)s") + +logger.info("Name should be equal service value") + +additional_log_attributes = {"process": "%(process)d", "processName": "%(processName)s"} +logger.append_keys(**additional_log_attributes) +logger.info("This will include process ID and name") +logger.remove_keys(["processName"]) + +# further messages will not include processName diff --git a/examples/logger/src/append_keys.py b/examples/logger/src/append_keys.py new file mode 100644 index 00000000000..0ef9cbe0f63 --- /dev/null +++ b/examples/logger/src/append_keys.py @@ -0,0 +1,15 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +def handler(event: dict, context: LambdaContext) -> str: + order_id = event.get("order_id") + + # this will ensure order_id key always has the latest value before logging + # alternative, you can use `clear_state=True` parameter in @inject_lambda_context + logger.append_keys(order_id=order_id) + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/append_keys_extra.py b/examples/logger/src/append_keys_extra.py new file mode 100644 index 00000000000..0c66425f775 --- /dev/null +++ b/examples/logger/src/append_keys_extra.py @@ -0,0 +1,11 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +def handler(event: dict, context: LambdaContext) -> str: + fields = {"request_id": "1123"} + logger.info("Collecting payment", extra=fields) + + return "hello world" diff --git a/examples/logger/src/append_keys_extra_output.json b/examples/logger/src/append_keys_extra_output.json new file mode 100644 index 00000000000..b25abb226a1 --- /dev/null +++ b/examples/logger/src/append_keys_extra_output.json @@ -0,0 +1,8 @@ +{ + "level": "INFO", + "location": "collect.handler:9", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "request_id": "1123" +} diff --git a/examples/logger/src/append_keys_output.json b/examples/logger/src/append_keys_output.json new file mode 100644 index 00000000000..1e6d38bf785 --- /dev/null +++ b/examples/logger/src/append_keys_output.json @@ -0,0 +1,8 @@ +{ + "level": "INFO", + "location": "collect.handler:11", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "order_id": "order_id_value" +} diff --git a/examples/logger/src/append_keys_vs_extra.py b/examples/logger/src/append_keys_vs_extra.py new file mode 100644 index 00000000000..ab67ceb6932 --- /dev/null +++ b/examples/logger/src/append_keys_vs_extra.py @@ -0,0 +1,28 @@ +import os + +import requests + +from aws_lambda_powertools import Logger + +ENDPOINT = os.getenv("PAYMENT_API", "") +logger = Logger(service="payment") + + +class PaymentError(Exception): + ... + + +def handler(event, context): + logger.append_keys(payment_id="123456789") + charge_id = event.get("charge_id", "") + + try: + ret = requests.post(url=f"{ENDPOINT}/collect", data={"charge_id": charge_id}) + ret.raise_for_status() + + logger.info("Charge collected successfully", extra={"charge_id": charge_id}) + return ret.json() + except requests.HTTPError as e: + raise PaymentError(f"Unable to collect payment for charge {charge_id}") from e + + logger.info("goodbye") diff --git a/examples/logger/src/append_keys_vs_extra_output.json b/examples/logger/src/append_keys_vs_extra_output.json new file mode 100644 index 00000000000..444986d7714 --- /dev/null +++ b/examples/logger/src/append_keys_vs_extra_output.json @@ -0,0 +1,21 @@ +[ + { + "level": "INFO", + "location": ":22", + "message": "Charge collected successfully", + "timestamp": "2021-01-12 14:09:10,859", + "service": "payment", + "sampling_rate": 0.0, + "payment_id": "123456789", + "charge_id": "75edbad0-0857-4fc9-b547-6180e2f7959b" + }, + { + "level": "INFO", + "location": ":27", + "message": "goodbye", + "timestamp": "2021-01-12 14:09:10,860", + "service": "payment", + "sampling_rate": 0.0, + "payment_id": "123456789" + } +] diff --git a/examples/logger/src/bring_your_own_formatter.py b/examples/logger/src/bring_your_own_formatter.py new file mode 100644 index 00000000000..1b85105f930 --- /dev/null +++ b/examples/logger/src/bring_your_own_formatter.py @@ -0,0 +1,13 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.formatter import LambdaPowertoolsFormatter + + +class CustomFormatter(LambdaPowertoolsFormatter): + def serialize(self, log: dict) -> str: + """Serialize final structured log dict to JSON str""" + log["event"] = log.pop("message") # rename message key to event + return self.json_serializer(log) # use configured json serializer + + +logger = Logger(service="payment", logger_formatter=CustomFormatter()) +logger.info("hello") diff --git a/examples/logger/src/bring_your_own_formatter_from_scratch.py b/examples/logger/src/bring_your_own_formatter_from_scratch.py new file mode 100644 index 00000000000..3088bf2a80f --- /dev/null +++ b/examples/logger/src/bring_your_own_formatter_from_scratch.py @@ -0,0 +1,43 @@ +import json +import logging +from typing import Iterable, List, Optional + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.formatter import BasePowertoolsFormatter + + +class CustomFormatter(BasePowertoolsFormatter): + def __init__(self, log_record_order: Optional[List[str]], *args, **kwargs): + self.log_record_order = log_record_order or ["level", "location", "message", "timestamp"] + self.log_format = dict.fromkeys(self.log_record_order) + super().__init__(*args, **kwargs) + + def append_keys(self, **additional_keys): + # also used by `inject_lambda_context` decorator + self.log_format.update(additional_keys) + + def remove_keys(self, keys: Iterable[str]): + for key in keys: + self.log_format.pop(key, None) + + def clear_state(self): + self.log_format = dict.fromkeys(self.log_record_order) + + def format(self, record: logging.LogRecord) -> str: # noqa: A003 + """Format logging record as structured JSON str""" + return json.dumps( + { + "event": super().format(record), + "timestamp": self.formatTime(record), + "my_default_key": "test", + **self.log_format, + } + ) + + +logger = Logger(service="payment", logger_formatter=CustomFormatter()) + + +@logger.inject_lambda_context +def handler(event, context): + logger.info("Collecting payment") diff --git a/examples/logger/src/bring_your_own_formatter_from_scratch_output.json b/examples/logger/src/bring_your_own_formatter_from_scratch_output.json new file mode 100644 index 00000000000..147b4c1b443 --- /dev/null +++ b/examples/logger/src/bring_your_own_formatter_from_scratch_output.json @@ -0,0 +1,10 @@ +{ + "event": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494", + "my_default_key": "test", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" +} diff --git a/examples/logger/src/bring_your_own_formatter_output.json b/examples/logger/src/bring_your_own_formatter_output.json new file mode 100644 index 00000000000..19869b7b885 --- /dev/null +++ b/examples/logger/src/bring_your_own_formatter_output.json @@ -0,0 +1,7 @@ +{ + "level": "INFO", + "location": ":16", + "timestamp": "2021-12-30 13:41:53,413+0100", + "service": "payment", + "event": "hello" +} diff --git a/examples/logger/src/bring_your_own_handler.py b/examples/logger/src/bring_your_own_handler.py new file mode 100644 index 00000000000..e70abca794f --- /dev/null +++ b/examples/logger/src/bring_your_own_handler.py @@ -0,0 +1,11 @@ +import logging +from pathlib import Path + +from aws_lambda_powertools import Logger + +log_file = Path("/tmp/log.json") +log_file_handler = logging.FileHandler(filename=log_file) + +logger = Logger(service="payment", logger_handler=log_file_handler) + +logger.info("hello world") diff --git a/examples/logger/src/bring_your_own_json_serializer.py b/examples/logger/src/bring_your_own_json_serializer.py new file mode 100644 index 00000000000..204e131fb87 --- /dev/null +++ b/examples/logger/src/bring_your_own_json_serializer.py @@ -0,0 +1,17 @@ +import functools + +import orjson + +from aws_lambda_powertools import Logger + +custom_serializer = orjson.dumps +custom_deserializer = orjson.loads + +logger = Logger(service="payment", json_serializer=custom_serializer, json_deserializer=custom_deserializer) + +# NOTE: when using parameters, you can pass a partial +custom_serializer_with_parameters = functools.partial(orjson.dumps, option=orjson.OPT_SERIALIZE_NUMPY) + +logger_two = Logger( + service="payment", json_serializer=custom_serializer_with_parameters, json_deserializer=custom_deserializer +) diff --git a/examples/logger/src/clear_state.py b/examples/logger/src/clear_state.py new file mode 100644 index 00000000000..ec842f034c1 --- /dev/null +++ b/examples/logger/src/clear_state.py @@ -0,0 +1,16 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context(clear_state=True) +def handler(event: dict, context: LambdaContext) -> str: + if event.get("special_key"): + # Should only be available in the first request log + # as the second request doesn't contain `special_key` + logger.append_keys(debugging_key="value") + + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/clear_state_event_one.json b/examples/logger/src/clear_state_event_one.json new file mode 100644 index 00000000000..0f051787013 --- /dev/null +++ b/examples/logger/src/clear_state_event_one.json @@ -0,0 +1,13 @@ +{ + "level": "INFO", + "location": "collect.handler:10", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "special_key": "debug_key", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" +} diff --git a/examples/logger/src/clear_state_event_two.json b/examples/logger/src/clear_state_event_two.json new file mode 100644 index 00000000000..0f019adf3a5 --- /dev/null +++ b/examples/logger/src/clear_state_event_two.json @@ -0,0 +1,12 @@ +{ + "level": "INFO", + "location": "collect.handler:10", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": false, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" +} diff --git a/examples/logger/src/cloning_logger_config.py b/examples/logger/src/cloning_logger_config.py new file mode 100644 index 00000000000..27075568ae9 --- /dev/null +++ b/examples/logger/src/cloning_logger_config.py @@ -0,0 +1,11 @@ +import logging + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging import utils + +logger = Logger() + +external_logger = logging.getLogger() + +utils.copy_config_to_registered_loggers(source_logger=logger) +external_logger.info("test message") diff --git a/examples/logger/src/enabling_boto_logging.py b/examples/logger/src/enabling_boto_logging.py new file mode 100644 index 00000000000..cce8dc6f8e7 --- /dev/null +++ b/examples/logger/src/enabling_boto_logging.py @@ -0,0 +1,18 @@ +from typing import Dict, List + +import boto3 + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +boto3.set_stream_logger() +boto3.set_stream_logger("botocore") + +logger = Logger() +client = boto3.client("s3") + + +def handler(event: Dict, context: LambdaContext) -> List: + response = client.list_buckets() + + return response.get("Buckets", []) diff --git a/examples/logger/src/fake_lambda_context_for_logger.py b/examples/logger/src/fake_lambda_context_for_logger.py new file mode 100644 index 00000000000..d3b3efc98f9 --- /dev/null +++ b/examples/logger/src/fake_lambda_context_for_logger.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass + +import fake_lambda_context_for_logger_module # sample module for completeness +import pytest + + +@pytest.fixture +def lambda_context(): + @dataclass + class LambdaContext: + function_name: str = "test" + memory_limit_in_mb: int = 128 + invoked_function_arn: str = "arn:aws:lambda:eu-west-1:809313241:function:test" + aws_request_id: str = "52fdfc07-2182-154f-163f-5f0f9a621d72" + + return LambdaContext() + + +def test_lambda_handler(lambda_context): + test_event = {"test": "event"} + fake_lambda_context_for_logger_module.handler(test_event, lambda_context) diff --git a/examples/logger/src/fake_lambda_context_for_logger_module.py b/examples/logger/src/fake_lambda_context_for_logger_module.py new file mode 100644 index 00000000000..fcb94f99db1 --- /dev/null +++ b/examples/logger/src/fake_lambda_context_for_logger_module.py @@ -0,0 +1,11 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context +def handler(event: dict, context: LambdaContext) -> str: + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/inject_lambda_context.py b/examples/logger/src/inject_lambda_context.py new file mode 100644 index 00000000000..0bdf203565d --- /dev/null +++ b/examples/logger/src/inject_lambda_context.py @@ -0,0 +1,13 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context +def handler(event: dict, context: LambdaContext) -> str: + logger.info("Collecting payment") + + # You can log entire objects too + logger.info({"operation": "collect_payment", "charge_id": event["charge_id"]}) + return "hello world" diff --git a/examples/logger/src/inject_lambda_context_output.json b/examples/logger/src/inject_lambda_context_output.json new file mode 100644 index 00000000000..edf2f7d6dc6 --- /dev/null +++ b/examples/logger/src/inject_lambda_context_output.json @@ -0,0 +1,29 @@ +[ + { + "level": "INFO", + "location": "collect.handler:9", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" + }, + { + "level": "INFO", + "location": "collect.handler:12", + "message": { + "operation": "collect_payment", + "charge_id": "ch_AZFlk2345C0" + }, + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72" + } +] diff --git a/examples/logger/src/log_incoming_event.py b/examples/logger/src/log_incoming_event.py new file mode 100644 index 00000000000..264a568c4ba --- /dev/null +++ b/examples/logger/src/log_incoming_event.py @@ -0,0 +1,9 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context(log_event=True) +def handler(event: dict, context: LambdaContext) -> str: + return "hello world" diff --git a/examples/logger/src/logger_reuse.py b/examples/logger/src/logger_reuse.py new file mode 100644 index 00000000000..a232eadd979 --- /dev/null +++ b/examples/logger/src/logger_reuse.py @@ -0,0 +1,13 @@ +from logger_reuse_payment import inject_payment_id + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context +def handler(event: dict, context: LambdaContext) -> str: + inject_payment_id(context=event) + logger.info("Collecting payment") + return "hello world" diff --git a/examples/logger/src/logger_reuse_output.json b/examples/logger/src/logger_reuse_output.json new file mode 100644 index 00000000000..15bc6e4fa88 --- /dev/null +++ b/examples/logger/src/logger_reuse_output.json @@ -0,0 +1,13 @@ +{ + "level": "INFO", + "location": "collect.handler:12", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + "payment_id": "968adaae-a211-47af-bda3-eed3ca2c0ed0" +} diff --git a/examples/logger/src/logger_reuse_payment.py b/examples/logger/src/logger_reuse_payment.py new file mode 100644 index 00000000000..00cad95d161 --- /dev/null +++ b/examples/logger/src/logger_reuse_payment.py @@ -0,0 +1,7 @@ +from aws_lambda_powertools import Logger + +logger = Logger() + + +def inject_payment_id(context): + logger.append_keys(payment_id=context.get("payment_id")) diff --git a/examples/logger/src/logging_exceptions.py b/examples/logger/src/logging_exceptions.py new file mode 100644 index 00000000000..31df43cd663 --- /dev/null +++ b/examples/logger/src/logging_exceptions.py @@ -0,0 +1,18 @@ +import requests + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +ENDPOINT = "http://httpbin.org/status/500" +logger = Logger() + + +def handler(event: dict, context: LambdaContext) -> str: + try: + ret = requests.get(ENDPOINT) + ret.raise_for_status() + except requests.HTTPError as e: + logger.exception("Received a HTTP 5xx error") + raise RuntimeError("Unable to fullfil request") from e + + return "hello world" diff --git a/examples/logger/src/logging_exceptions_output.json b/examples/logger/src/logging_exceptions_output.json new file mode 100644 index 00000000000..8f3011e3a87 --- /dev/null +++ b/examples/logger/src/logging_exceptions_output.json @@ -0,0 +1,9 @@ +{ + "level": "ERROR", + "location": "collect.handler:15", + "message": "Received a HTTP 5xx error", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "exception_name": "RuntimeError", + "exception": "Traceback (most recent call last):\n File \"\", line 2, in RuntimeError: Unable to fullfil request" +} diff --git a/examples/logger/src/logging_inheritance_bad.py b/examples/logger/src/logging_inheritance_bad.py new file mode 100644 index 00000000000..18510720d9e --- /dev/null +++ b/examples/logger/src/logging_inheritance_bad.py @@ -0,0 +1,16 @@ +from logging_inheritance_module import inject_payment_id + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +# NOTE: explicit service name differs from Child +# meaning we will have two Logger instances with different state +# and an orphan child logger who won't be able to manipulate state +logger = Logger(service="payment") + + +@logger.inject_lambda_context +def handler(event: dict, context: LambdaContext) -> str: + inject_payment_id(context=event) + + return "hello world" diff --git a/examples/logger/src/logging_inheritance_good.py b/examples/logger/src/logging_inheritance_good.py new file mode 100644 index 00000000000..f7e29d09df7 --- /dev/null +++ b/examples/logger/src/logging_inheritance_good.py @@ -0,0 +1,16 @@ +from logging_inheritance_module import inject_payment_id + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +# NOTE: explicit service name matches any new Logger +# because we're using POWERTOOLS_SERVICE_NAME env var +# but we could equally use the same string as service value, e.g. "payment" +logger = Logger() + + +@logger.inject_lambda_context +def handler(event: dict, context: LambdaContext) -> str: + inject_payment_id(context=event) + + return "hello world" diff --git a/examples/logger/src/logging_inheritance_module.py b/examples/logger/src/logging_inheritance_module.py new file mode 100644 index 00000000000..7891a972da6 --- /dev/null +++ b/examples/logger/src/logging_inheritance_module.py @@ -0,0 +1,7 @@ +from aws_lambda_powertools import Logger + +logger = Logger(child=True) + + +def inject_payment_id(context): + logger.append_keys(payment_id=context.get("payment_id")) diff --git a/examples/logger/src/overriding_log_records.py b/examples/logger/src/overriding_log_records.py new file mode 100644 index 00000000000..f32da431158 --- /dev/null +++ b/examples/logger/src/overriding_log_records.py @@ -0,0 +1,12 @@ +from aws_lambda_powertools import Logger + +date_format = "%m/%d/%Y %I:%M:%S %p" +location_format = "[%(funcName)s] %(module)s" + +# override location and timestamp format +logger = Logger(service="payment", location=location_format, datefmt=date_format) + +# suppress the location key with a None value +logger_two = Logger(service="payment", location=None) + +logger.info("Collecting payment") diff --git a/examples/logger/src/overriding_log_records_output.json b/examples/logger/src/overriding_log_records_output.json new file mode 100644 index 00000000000..ba2f1dfe8d5 --- /dev/null +++ b/examples/logger/src/overriding_log_records_output.json @@ -0,0 +1,7 @@ +{ + "level": "INFO", + "location": "[] lambda_handler", + "message": "Collecting payment", + "timestamp": "02/09/2021 09:25:17 AM", + "service": "payment" +} diff --git a/examples/logger/src/powertools_formatter_setup.py b/examples/logger/src/powertools_formatter_setup.py new file mode 100644 index 00000000000..b6f38a92bdd --- /dev/null +++ b/examples/logger/src/powertools_formatter_setup.py @@ -0,0 +1,8 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging.formatter import LambdaPowertoolsFormatter + +# NOTE: Check docs for all available options +# https://awslabs.github.io/aws-lambda-powertools-python/latest/core/logger/#lambdapowertoolsformatter + +formatter = LambdaPowertoolsFormatter(utc=True, log_record_order=["message"]) +logger = Logger(service="example", logger_formatter=formatter) diff --git a/examples/logger/src/remove_keys.py b/examples/logger/src/remove_keys.py new file mode 100644 index 00000000000..763387d9399 --- /dev/null +++ b/examples/logger/src/remove_keys.py @@ -0,0 +1,14 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +def handler(event: dict, context: LambdaContext) -> str: + logger.append_keys(sample_key="value") + logger.info("Collecting payment") + + logger.remove_keys(["sample_key"]) + logger.info("Collecting payment without sample key") + + return "hello world" diff --git a/examples/logger/src/remove_keys_output.json b/examples/logger/src/remove_keys_output.json new file mode 100644 index 00000000000..4ec8740784e --- /dev/null +++ b/examples/logger/src/remove_keys_output.json @@ -0,0 +1,17 @@ +[ + { + "level": "INFO", + "location": "collect.handler:9", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "sample_key": "value" + }, + { + "level": "INFO", + "location": "collect.handler:12", + "message": "Collecting payment without sample key", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment" + } +] diff --git a/examples/logger/src/reordering_log_keys.py b/examples/logger/src/reordering_log_keys.py new file mode 100644 index 00000000000..a3de53a6aed --- /dev/null +++ b/examples/logger/src/reordering_log_keys.py @@ -0,0 +1,11 @@ +from aws_lambda_powertools import Logger + +# make message as the first key +logger = Logger(service="payment", log_record_order=["message"]) + +# make request_id that will be added later as the first key +logger_two = Logger(service="order", log_record_order=["request_id"]) +logger_two.append_keys(request_id="123") + +logger.info("hello world") +logger_two.info("hello world") diff --git a/examples/logger/src/reordering_log_keys_output.json b/examples/logger/src/reordering_log_keys_output.json new file mode 100644 index 00000000000..c89f7cb48bd --- /dev/null +++ b/examples/logger/src/reordering_log_keys_output.json @@ -0,0 +1,17 @@ +[ + { + "message": "hello world", + "level": "INFO", + "location": ":11", + "timestamp": "2022-06-24 11:25:40,143+0200", + "service": "payment" + }, + { + "request_id": "123", + "level": "INFO", + "location": ":12", + "timestamp": "2022-06-24 11:25:40,144+0200", + "service": "order", + "message": "hello universe" + } +] diff --git a/examples/logger/src/sampling_debug_logs.py b/examples/logger/src/sampling_debug_logs.py new file mode 100644 index 00000000000..3bbb1cdb920 --- /dev/null +++ b/examples/logger/src/sampling_debug_logs.py @@ -0,0 +1,13 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +# Sample 10% of debug logs e.g. 0.1 +# NOTE: this evaluation will only occur at cold start +logger = Logger(service="payment", sample_rate=0.1) + + +def handler(event: dict, context: LambdaContext): + logger.debug("Verifying whether order_id is present") + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/sampling_debug_logs_output.json b/examples/logger/src/sampling_debug_logs_output.json new file mode 100644 index 00000000000..f216753aea1 --- /dev/null +++ b/examples/logger/src/sampling_debug_logs_output.json @@ -0,0 +1,28 @@ +[ + { + "level": "DEBUG", + "location": "collect.handler:7", + "message": "Verifying whether order_id is present", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + "sampling_rate": 0.1 + }, + { + "level": "INFO", + "location": "collect.handler:7", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + "sampling_rate": 0.1 + } +] diff --git a/examples/logger/src/set_correlation_id.py b/examples/logger/src/set_correlation_id.py new file mode 100644 index 00000000000..3aa0bc5f2be --- /dev/null +++ b/examples/logger/src/set_correlation_id.py @@ -0,0 +1,12 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context(correlation_id_path="headers.my_request_id_header") +def handler(event: dict, context: LambdaContext) -> str: + logger.debug(f"Correlation ID => {logger.get_correlation_id()}") + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/set_correlation_id_event.json b/examples/logger/src/set_correlation_id_event.json new file mode 100644 index 00000000000..e74f572f070 --- /dev/null +++ b/examples/logger/src/set_correlation_id_event.json @@ -0,0 +1,5 @@ +{ + "headers": { + "my_request_id_header": "correlation_id_value" + } +} diff --git a/examples/logger/src/set_correlation_id_jmespath.py b/examples/logger/src/set_correlation_id_jmespath.py new file mode 100644 index 00000000000..049bc70a957 --- /dev/null +++ b/examples/logger/src/set_correlation_id_jmespath.py @@ -0,0 +1,13 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging import correlation_paths +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST) +def handler(event: dict, context: LambdaContext) -> str: + logger.debug(f"Correlation ID => {logger.get_correlation_id()}") + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/set_correlation_id_jmespath_event.json b/examples/logger/src/set_correlation_id_jmespath_event.json new file mode 100644 index 00000000000..dc27e741882 --- /dev/null +++ b/examples/logger/src/set_correlation_id_jmespath_event.json @@ -0,0 +1,5 @@ +{ + "requestContext": { + "requestId": "correlation_id_value" + } +} diff --git a/examples/logger/src/set_correlation_id_jmespath_output.json b/examples/logger/src/set_correlation_id_jmespath_output.json new file mode 100644 index 00000000000..168cc238301 --- /dev/null +++ b/examples/logger/src/set_correlation_id_jmespath_output.json @@ -0,0 +1,13 @@ +{ + "level": "INFO", + "location": "collect.handler:11", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + "correlation_id": "correlation_id_value" +} diff --git a/examples/logger/src/set_correlation_id_method.py b/examples/logger/src/set_correlation_id_method.py new file mode 100644 index 00000000000..74eaa338df6 --- /dev/null +++ b/examples/logger/src/set_correlation_id_method.py @@ -0,0 +1,14 @@ +from aws_lambda_powertools import Logger +from aws_lambda_powertools.utilities.data_classes import APIGatewayProxyEvent +from aws_lambda_powertools.utilities.typing import LambdaContext + +logger = Logger() + + +def handler(event: dict, context: LambdaContext) -> str: + request = APIGatewayProxyEvent(event) + + logger.set_correlation_id(request.request_context.request_id) + logger.info("Collecting payment") + + return "hello world" diff --git a/examples/logger/src/set_correlation_id_method_event.json b/examples/logger/src/set_correlation_id_method_event.json new file mode 100644 index 00000000000..dc27e741882 --- /dev/null +++ b/examples/logger/src/set_correlation_id_method_event.json @@ -0,0 +1,5 @@ +{ + "requestContext": { + "requestId": "correlation_id_value" + } +} diff --git a/examples/logger/src/set_correlation_id_method_output.json b/examples/logger/src/set_correlation_id_method_output.json new file mode 100644 index 00000000000..f78d26740ae --- /dev/null +++ b/examples/logger/src/set_correlation_id_method_output.json @@ -0,0 +1,8 @@ +{ + "level": "INFO", + "location": "collect.handler:13", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "correlation_id": "correlation_id_value" +} diff --git a/examples/logger/src/set_correlation_id_output.json b/examples/logger/src/set_correlation_id_output.json new file mode 100644 index 00000000000..23a5040ad91 --- /dev/null +++ b/examples/logger/src/set_correlation_id_output.json @@ -0,0 +1,13 @@ +{ + "level": "INFO", + "location": "collect.handler:10", + "message": "Collecting payment", + "timestamp": "2021-05-03 11:47:12,494+0200", + "service": "payment", + "cold_start": true, + "lambda_function_name": "test", + "lambda_function_memory_size": 128, + "lambda_function_arn": "arn:aws:lambda:eu-west-1:12345678910:function:test", + "lambda_request_id": "52fdfc07-2182-154f-163f-5f0f9a621d72", + "correlation_id": "correlation_id_value" +} diff --git a/examples/logger/src/setting_utc_timestamp.py b/examples/logger/src/setting_utc_timestamp.py new file mode 100644 index 00000000000..a454e216d75 --- /dev/null +++ b/examples/logger/src/setting_utc_timestamp.py @@ -0,0 +1,7 @@ +from aws_lambda_powertools import Logger + +logger = Logger(service="payment") +logger.info("Local time") + +logger_in_utc = Logger(service="order", utc=True) +logger_in_utc.info("GMT time zone") diff --git a/examples/logger/src/setting_utc_timestamp_output.json b/examples/logger/src/setting_utc_timestamp_output.json new file mode 100644 index 00000000000..80083fbf61b --- /dev/null +++ b/examples/logger/src/setting_utc_timestamp_output.json @@ -0,0 +1,16 @@ +[ + { + "level": "INFO", + "location": ":4", + "message": "Local time", + "timestamp": "2022-06-24 11:39:49,421+0200", + "service": "payment" + }, + { + "level": "INFO", + "location": ":7", + "message": "GMT time zone", + "timestamp": "2022-06-24 09:39:49,421+0100", + "service": "order" + } +] diff --git a/examples/logger/src/unserializable_values.py b/examples/logger/src/unserializable_values.py new file mode 100644 index 00000000000..9ed196827b2 --- /dev/null +++ b/examples/logger/src/unserializable_values.py @@ -0,0 +1,19 @@ +from datetime import date, datetime + +from aws_lambda_powertools import Logger + + +def custom_json_default(value: object) -> str: + if isinstance(value, (datetime, date)): + return value.isoformat() + + return f"" + + +class Unserializable: + pass + + +logger = Logger(service="payment", json_default=custom_json_default) + +logger.info({"ingestion_time": datetime.utcnow(), "serialize_me": Unserializable()}) diff --git a/examples/logger/src/unserializable_values_output.json b/examples/logger/src/unserializable_values_output.json new file mode 100644 index 00000000000..ed7770cab03 --- /dev/null +++ b/examples/logger/src/unserializable_values_output.json @@ -0,0 +1,10 @@ +{ + "level": "INFO", + "location": ":19", + "message": { + "ingestion_time": "2022-06-24T10:12:09.526365", + "serialize_me": "" + }, + "timestamp": "2022-06-24 12:12:09,526+0200", + "service": "payment" +} diff --git a/examples/metrics/sam/template.yaml b/examples/metrics/sam/template.yaml new file mode 100644 index 00000000000..154dacdfd9b --- /dev/null +++ b/examples/metrics/sam/template.yaml @@ -0,0 +1,25 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: AWS Lambda Powertools Metrics doc examples + +Globals: + Function: + Timeout: 5 + Runtime: python3.9 + Tracing: Active + Environment: + Variables: + POWERTOOLS_SERVICE_NAME: booking + POWERTOOLS_METRICS_NAMESPACE: ServerlessAirline + + Layers: + # Find the latest Layer version in the official documentation + # https://awslabs.github.io/aws-lambda-powertools-python/latest/#lambda-layer + - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:21 + +Resources: + CaptureLambdaHandlerExample: + Type: AWS::Serverless::Function + Properties: + CodeUri: ../src + Handler: capture_lambda_handler.handler diff --git a/examples/metrics/src/add_dimension.py b/examples/metrics/src/add_dimension.py new file mode 100644 index 00000000000..530768120bf --- /dev/null +++ b/examples/metrics/src/add_dimension.py @@ -0,0 +1,14 @@ +import os + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +STAGE = os.getenv("STAGE", "dev") +metrics = Metrics() + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_dimension(name="environment", value=STAGE) + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) diff --git a/examples/metrics/src/add_metadata.py b/examples/metrics/src/add_metadata.py new file mode 100644 index 00000000000..8724cc7b6bb --- /dev/null +++ b/examples/metrics/src/add_metadata.py @@ -0,0 +1,13 @@ +from uuid import uuid4 + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +@metrics.log_metrics +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + metrics.add_metadata(key="booking_id", value=f"{uuid4()}") diff --git a/examples/metrics/src/add_metadata_output.json b/examples/metrics/src/add_metadata_output.json new file mode 100644 index 00000000000..017c12c2b94 --- /dev/null +++ b/examples/metrics/src/add_metadata_output.json @@ -0,0 +1,26 @@ +{ + "_aws": { + "Timestamp": 1656688250155, + "CloudWatchMetrics": [ + { + "Namespace": "ServerlessAirline", + "Dimensions": [ + [ + "service" + ] + ], + "Metrics": [ + { + "Name": "SuccessfulBooking", + "Unit": "Count" + } + ] + } + ] + }, + "service": "booking", + "booking_id": "00347014-341d-4b8e-8421-a89d3d588ab3", + "SuccessfulBooking": [ + 1.0 + ] +} diff --git a/examples/metrics/src/add_metrics.py b/examples/metrics/src/add_metrics.py new file mode 100644 index 00000000000..7e9306416ad --- /dev/null +++ b/examples/metrics/src/add_metrics.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) diff --git a/examples/metrics/src/add_multi_value_metrics.py b/examples/metrics/src/add_multi_value_metrics.py new file mode 100644 index 00000000000..5325976bf2a --- /dev/null +++ b/examples/metrics/src/add_multi_value_metrics.py @@ -0,0 +1,15 @@ +import os + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +STAGE = os.getenv("STAGE", "dev") +metrics = Metrics() + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_dimension(name="environment", value=STAGE) + metrics.add_metric(name="TurbineReads", unit=MetricUnit.Count, value=1) + metrics.add_metric(name="TurbineReads", unit=MetricUnit.Count, value=8) diff --git a/examples/metrics/src/add_multi_value_metrics_output.json b/examples/metrics/src/add_multi_value_metrics_output.json new file mode 100644 index 00000000000..43c0d34aa96 --- /dev/null +++ b/examples/metrics/src/add_multi_value_metrics_output.json @@ -0,0 +1,28 @@ +{ + "_aws": { + "Timestamp": 1656685750622, + "CloudWatchMetrics": [ + { + "Namespace": "ServerlessAirline", + "Dimensions": [ + [ + "environment", + "service" + ] + ], + "Metrics": [ + { + "Name": "TurbineReads", + "Unit": "Count" + } + ] + } + ] + }, + "environment": "dev", + "service": "booking", + "TurbineReads": [ + 1.0, + 8.0 + ] +} diff --git a/examples/metrics/src/assert_multiple_emf_blobs.py b/examples/metrics/src/assert_multiple_emf_blobs.py new file mode 100644 index 00000000000..6ed89460788 --- /dev/null +++ b/examples/metrics/src/assert_multiple_emf_blobs.py @@ -0,0 +1,34 @@ +import json +from dataclasses import dataclass + +import assert_multiple_emf_blobs_module +import pytest + + +@pytest.fixture +def lambda_context(): + @dataclass + class LambdaContext: + function_name: str = "test" + memory_limit_in_mb: int = 128 + invoked_function_arn: str = "arn:aws:lambda:eu-west-1:809313241:function:test" + aws_request_id: str = "52fdfc07-2182-154f-163f-5f0f9a621d72" + + return LambdaContext() + + +def capture_metrics_output_multiple_emf_objects(capsys): + return [json.loads(line.strip()) for line in capsys.readouterr().out.split("\n") if line] + + +def test_log_metrics(capsys, lambda_context): + assert_multiple_emf_blobs_module.lambda_handler({}, lambda_context) + + cold_start_blob, custom_metrics_blob = capture_metrics_output_multiple_emf_objects(capsys) + + # Since `capture_cold_start_metric` is used + # we should have one JSON blob for cold start metric and one for the application + assert cold_start_blob["ColdStart"] == [1.0] + assert cold_start_blob["function_name"] == "test" + + assert "SuccessfulBooking" in custom_metrics_blob diff --git a/examples/metrics/src/assert_multiple_emf_blobs_module.py b/examples/metrics/src/assert_multiple_emf_blobs_module.py new file mode 100644 index 00000000000..37816bc7a5d --- /dev/null +++ b/examples/metrics/src/assert_multiple_emf_blobs_module.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +@metrics.log_metrics(capture_cold_start_metric=True) +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) diff --git a/examples/metrics/src/assert_single_emf_blob.py b/examples/metrics/src/assert_single_emf_blob.py new file mode 100644 index 00000000000..e1b191dcb42 --- /dev/null +++ b/examples/metrics/src/assert_single_emf_blob.py @@ -0,0 +1,15 @@ +import json + +import add_metrics + + +def test_log_metrics(capsys): + add_metrics.lambda_handler({}, {}) + + log = capsys.readouterr().out.strip() # remove any extra line + metrics_output = json.loads(log) # deserialize JSON str + + # THEN we should have no exceptions + # and a valid EMF object should be flushed correctly + assert "SuccessfulBooking" in log # basic string assertion in JSON str + assert "SuccessfulBooking" in metrics_output["_aws"]["CloudWatchMetrics"][0]["Metrics"][0]["Name"] diff --git a/examples/metrics/src/capture_cold_start_metric.py b/examples/metrics/src/capture_cold_start_metric.py new file mode 100644 index 00000000000..93468eba345 --- /dev/null +++ b/examples/metrics/src/capture_cold_start_metric.py @@ -0,0 +1,9 @@ +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +@metrics.log_metrics(capture_cold_start_metric=True) +def lambda_handler(event: dict, context: LambdaContext): + ... diff --git a/examples/metrics/src/capture_cold_start_metric_output.json b/examples/metrics/src/capture_cold_start_metric_output.json new file mode 100644 index 00000000000..666eb00c8f2 --- /dev/null +++ b/examples/metrics/src/capture_cold_start_metric_output.json @@ -0,0 +1,27 @@ +{ + "_aws": { + "Timestamp": 1656687493142, + "CloudWatchMetrics": [ + { + "Namespace": "ServerlessAirline", + "Dimensions": [ + [ + "function_name", + "service" + ] + ], + "Metrics": [ + { + "Name": "ColdStart", + "Unit": "Count" + } + ] + } + ] + }, + "function_name": "test", + "service": "booking", + "ColdStart": [ + 1.0 + ] +} diff --git a/examples/metrics/src/clear_metrics_in_tests.py b/examples/metrics/src/clear_metrics_in_tests.py new file mode 100644 index 00000000000..cea3879af83 --- /dev/null +++ b/examples/metrics/src/clear_metrics_in_tests.py @@ -0,0 +1,14 @@ +import pytest + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import metrics as metrics_global + + +@pytest.fixture(scope="function", autouse=True) +def reset_metric_set(): + # Clear out every metric data prior to every test + metrics = Metrics() + metrics.clear_metrics() + metrics_global.is_cold_start = True # ensure each test has cold start + metrics.clear_default_dimensions() # remove persisted default dimensions, if any + yield diff --git a/examples/metrics/src/log_metrics_output.json b/examples/metrics/src/log_metrics_output.json new file mode 100644 index 00000000000..e563e06d174 --- /dev/null +++ b/examples/metrics/src/log_metrics_output.json @@ -0,0 +1,25 @@ +{ + "_aws": { + "Timestamp": 1656686788803, + "CloudWatchMetrics": [ + { + "Namespace": "ServerlessAirline", + "Dimensions": [ + [ + "service" + ] + ], + "Metrics": [ + { + "Name": "SuccessfulBooking", + "Unit": "Count" + } + ] + } + ] + }, + "service": "booking", + "SuccessfulBooking": [ + 1.0 + ] +} diff --git a/examples/metrics/src/manual_flush.py b/examples/metrics/src/manual_flush.py new file mode 100644 index 00000000000..def0f845d08 --- /dev/null +++ b/examples/metrics/src/manual_flush.py @@ -0,0 +1,14 @@ +import json + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="SuccessfulBooking", unit=MetricUnit.Count, value=1) + your_metrics_object = metrics.serialize_metric_set() + metrics.clear_metrics() + print(json.dumps(your_metrics_object)) diff --git a/examples/metrics/src/raise_on_empty_metrics.py b/examples/metrics/src/raise_on_empty_metrics.py new file mode 100644 index 00000000000..e7df8511486 --- /dev/null +++ b/examples/metrics/src/raise_on_empty_metrics.py @@ -0,0 +1,10 @@ +from aws_lambda_powertools.metrics import Metrics +from aws_lambda_powertools.utilities.typing import LambdaContext + +metrics = Metrics() + + +@metrics.log_metrics(raise_on_empty_metrics=True) +def lambda_handler(event: dict, context: LambdaContext): + # no metrics being created will now raise SchemaValidationError + ... diff --git a/examples/metrics/src/run_tests_env_var.sh b/examples/metrics/src/run_tests_env_var.sh new file mode 100644 index 00000000000..9b520e1af9a --- /dev/null +++ b/examples/metrics/src/run_tests_env_var.sh @@ -0,0 +1 @@ +POWERTOOLS_SERVICE_NAME="booking" POWERTOOLS_METRICS_NAMESPACE="ServerlessAirline" python -m pytest diff --git a/examples/metrics/src/set_default_dimensions.py b/examples/metrics/src/set_default_dimensions.py new file mode 100644 index 00000000000..3af925a24b6 --- /dev/null +++ b/examples/metrics/src/set_default_dimensions.py @@ -0,0 +1,15 @@ +import os + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +STAGE = os.getenv("STAGE", "dev") +metrics = Metrics() +metrics.set_default_dimensions(environment=STAGE, another="one") + + +@metrics.log_metrics # ensures metrics are flushed upon request completion/failure +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="TurbineReads", unit=MetricUnit.Count, value=1) + metrics.add_metric(name="TurbineReads", unit=MetricUnit.Count, value=8) diff --git a/examples/metrics/src/set_default_dimensions_log_metrics.py b/examples/metrics/src/set_default_dimensions_log_metrics.py new file mode 100644 index 00000000000..8355af4b18b --- /dev/null +++ b/examples/metrics/src/set_default_dimensions_log_metrics.py @@ -0,0 +1,16 @@ +import os + +from aws_lambda_powertools import Metrics +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +STAGE = os.getenv("STAGE", "dev") +metrics = Metrics() +DEFAULT_DIMENSIONS = {"environment": STAGE, "another": "one"} + + +# ensures metrics are flushed upon request completion/failure +@metrics.log_metrics(default_dimensions=DEFAULT_DIMENSIONS) +def lambda_handler(event: dict, context: LambdaContext): + metrics.add_metric(name="TurbineReads", unit=MetricUnit.Count, value=1) + metrics.add_metric(name="TurbineReads", unit=MetricUnit.Count, value=8) diff --git a/examples/metrics/src/single_metric.py b/examples/metrics/src/single_metric.py new file mode 100644 index 00000000000..e2bf0d6ab4e --- /dev/null +++ b/examples/metrics/src/single_metric.py @@ -0,0 +1,12 @@ +import os + +from aws_lambda_powertools import single_metric +from aws_lambda_powertools.metrics import MetricUnit +from aws_lambda_powertools.utilities.typing import LambdaContext + +STAGE = os.getenv("STAGE", "dev") + + +def lambda_handler(event: dict, context: LambdaContext): + with single_metric(name="MySingleMetric", unit=MetricUnit.Count, value=1) as metric: + metric.add_dimension(name="environment", value=STAGE) diff --git a/examples/metrics/src/single_metric_output.json b/examples/metrics/src/single_metric_output.json new file mode 100644 index 00000000000..ddf3807015b --- /dev/null +++ b/examples/metrics/src/single_metric_output.json @@ -0,0 +1,27 @@ +{ + "_aws": { + "Timestamp": 1656689267834, + "CloudWatchMetrics": [ + { + "Namespace": "ServerlessAirline", + "Dimensions": [ + [ + "environment", + "service" + ] + ], + "Metrics": [ + { + "Name": "MySingleMetric", + "Unit": "Count" + } + ] + } + ] + }, + "environment": "dev", + "service": "booking", + "MySingleMetric": [ + 1.0 + ] +} diff --git a/examples/tracer/sam/template.yaml b/examples/tracer/sam/template.yaml new file mode 100644 index 00000000000..bda46d308b3 --- /dev/null +++ b/examples/tracer/sam/template.yaml @@ -0,0 +1,23 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: AWS Lambda Powertools Tracer doc examples + +Globals: + Function: + Timeout: 5 + Runtime: python3.9 + Tracing: Active + Environment: + Variables: + POWERTOOLS_SERVICE_NAME: payment + Layers: + # Find the latest Layer version in the official documentation + # https://awslabs.github.io/aws-lambda-powertools-python/latest/#lambda-layer + - !Sub arn:aws:lambda:${AWS::Region}:017000801446:layer:AWSLambdaPowertoolsPython:21 + +Resources: + CaptureLambdaHandlerExample: + Type: AWS::Serverless::Function + Properties: + CodeUri: ../src + Handler: capture_lambda_handler.handler diff --git a/examples/tracer/src/capture_lambda_handler.py b/examples/tracer/src/capture_lambda_handler.py new file mode 100644 index 00000000000..f5d2c1efcea --- /dev/null +++ b/examples/tracer/src/capture_lambda_handler.py @@ -0,0 +1,15 @@ +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() # Sets service via POWERTOOLS_SERVICE_NAME env var +# OR tracer = Tracer(service="example") + + +def collect_payment(charge_id: str) -> str: + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return collect_payment(charge_id=charge_id) diff --git a/examples/tracer/src/capture_method.py b/examples/tracer/src/capture_method.py new file mode 100644 index 00000000000..edf1ed719f4 --- /dev/null +++ b/examples/tracer/src/capture_method.py @@ -0,0 +1,16 @@ +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +@tracer.capture_method +def collect_payment(charge_id: str) -> str: + tracer.put_annotation(key="PaymentId", value=charge_id) + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return collect_payment(charge_id=charge_id) diff --git a/examples/tracer/src/capture_method_async.py b/examples/tracer/src/capture_method_async.py new file mode 100644 index 00000000000..e142ef8f163 --- /dev/null +++ b/examples/tracer/src/capture_method_async.py @@ -0,0 +1,19 @@ +import asyncio + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +@tracer.capture_method +async def collect_payment(charge_id: str) -> str: + tracer.put_annotation(key="PaymentId", value=charge_id) + await asyncio.sleep(0.5) + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return asyncio.run(collect_payment(charge_id=charge_id)) diff --git a/examples/tracer/src/capture_method_async_concurrency.py b/examples/tracer/src/capture_method_async_concurrency.py new file mode 100644 index 00000000000..82e89070c75 --- /dev/null +++ b/examples/tracer/src/capture_method_async_concurrency.py @@ -0,0 +1,31 @@ +import asyncio + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +async def another_async_task(): + async with tracer.provider.in_subsegment_async("## another_async_task") as subsegment: + subsegment.put_annotation(key="key", value="value") + subsegment.put_metadata(key="key", value="value", namespace="namespace") + ... + + +async def another_async_task_2(): + async with tracer.provider.in_subsegment_async("## another_async_task_2") as subsegment: + subsegment.put_annotation(key="key", value="value") + subsegment.put_metadata(key="key", value="value", namespace="namespace") + ... + + +async def collect_payment(charge_id: str) -> str: + await asyncio.gather(another_async_task(), another_async_task_2()) + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return asyncio.run(collect_payment(charge_id=charge_id)) diff --git a/examples/tracer/src/capture_method_context_manager.py b/examples/tracer/src/capture_method_context_manager.py new file mode 100644 index 00000000000..083443607ac --- /dev/null +++ b/examples/tracer/src/capture_method_context_manager.py @@ -0,0 +1,27 @@ +import contextlib +from collections.abc import Generator + +from aws_lambda_powertools import Logger, Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() +logger = Logger() + + +@contextlib.contextmanager +@tracer.capture_method +def collect_payment(charge_id: str) -> Generator[str, None, None]: + try: + yield f"dummy payment collected for charge: {charge_id}" + finally: + tracer.put_annotation(key="PaymentId", value=charge_id) + + +@tracer.capture_lambda_handler +@logger.inject_lambda_context +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + with collect_payment(charge_id=charge_id) as receipt_id: + logger.info(f"Processing payment collection for charge {charge_id} with receipt {receipt_id}") + + return receipt_id diff --git a/examples/tracer/src/capture_method_generators.py b/examples/tracer/src/capture_method_generators.py new file mode 100644 index 00000000000..65b87c251e8 --- /dev/null +++ b/examples/tracer/src/capture_method_generators.py @@ -0,0 +1,17 @@ +from collections.abc import Generator + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +@tracer.capture_method +def collect_payment(charge_id: str) -> Generator[str, None, None]: + yield f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return next(collect_payment(charge_id=charge_id)) diff --git a/examples/tracer/src/disable_capture_error.py b/examples/tracer/src/disable_capture_error.py new file mode 100644 index 00000000000..7b7d7e6ad23 --- /dev/null +++ b/examples/tracer/src/disable_capture_error.py @@ -0,0 +1,31 @@ +import os + +import requests + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() +ENDPOINT = os.getenv("PAYMENT_API", "") + + +class PaymentError(Exception): + ... + + +@tracer.capture_method(capture_error=False) +def collect_payment(charge_id: str) -> dict: + try: + ret = requests.post(url=f"{ENDPOINT}/collect", data={"charge_id": charge_id}) + ret.raise_for_status() + return ret.json() + except requests.HTTPError as e: + raise PaymentError(f"Unable to collect payment for charge {charge_id}") from e + + +@tracer.capture_lambda_handler(capture_error=False) +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + ret = collect_payment(charge_id=charge_id) + + return ret.get("receipt_id", "") diff --git a/examples/tracer/src/disable_capture_response.py b/examples/tracer/src/disable_capture_response.py new file mode 100644 index 00000000000..ffe8230eece --- /dev/null +++ b/examples/tracer/src/disable_capture_response.py @@ -0,0 +1,18 @@ +from aws_lambda_powertools import Logger, Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() +logger = Logger() + + +@tracer.capture_method(capture_response=False) +def collect_payment(charge_id: str) -> str: + tracer.put_annotation(key="PaymentId", value=charge_id) + logger.debug("Returning sensitive information....") + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler(capture_response=False) +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return collect_payment(charge_id=charge_id) diff --git a/examples/tracer/src/disable_capture_response_streaming_body.py b/examples/tracer/src/disable_capture_response_streaming_body.py new file mode 100644 index 00000000000..3e458a98eb4 --- /dev/null +++ b/examples/tracer/src/disable_capture_response_streaming_body.py @@ -0,0 +1,30 @@ +import os + +import boto3 +from botocore.response import StreamingBody + +from aws_lambda_powertools import Logger, Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +BUCKET = os.getenv("BUCKET_NAME", "") +REPORT_KEY = os.getenv("REPORT_KEY", "") + +tracer = Tracer() +logger = Logger() + +session = boto3.Session() +s3 = session.client("s3") + + +@tracer.capture_method(capture_response=False) +def fetch_payment_report(payment_id: str) -> StreamingBody: + ret = s3.get_object(Bucket=BUCKET, Key=f"{REPORT_KEY}/{payment_id}") + logger.debug("Returning streaming body from S3 object....") + return ret["body"] + + +@tracer.capture_lambda_handler(capture_response=False) +def handler(event: dict, context: LambdaContext) -> str: + payment_id = event.get("payment_id", "") + report = fetch_payment_report(payment_id=payment_id) + return report.read().decode() diff --git a/examples/tracer/src/ignore_endpoints.py b/examples/tracer/src/ignore_endpoints.py new file mode 100644 index 00000000000..6484cfcf5b0 --- /dev/null +++ b/examples/tracer/src/ignore_endpoints.py @@ -0,0 +1,35 @@ +import os + +import requests + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +ENDPOINT = os.getenv("PAYMENT_API", "") +IGNORE_URLS = ["/collect", "/refund"] + +tracer = Tracer() +tracer.ignore_endpoint(hostname=ENDPOINT, urls=IGNORE_URLS) +tracer.ignore_endpoint(hostname=f"*.{ENDPOINT}", urls=IGNORE_URLS) # `.ENDPOINT` + + +class PaymentError(Exception): + ... + + +@tracer.capture_method(capture_error=False) +def collect_payment(charge_id: str) -> dict: + try: + ret = requests.post(url=f"{ENDPOINT}/collect", data={"charge_id": charge_id}) + ret.raise_for_status() + return ret.json() + except requests.HTTPError as e: + raise PaymentError(f"Unable to collect payment for charge {charge_id}") from e + + +@tracer.capture_lambda_handler(capture_error=False) +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + ret = collect_payment(charge_id=charge_id) + + return ret.get("receipt_id", "") diff --git a/examples/tracer/src/patch_modules.py b/examples/tracer/src/patch_modules.py new file mode 100644 index 00000000000..09e7092a85a --- /dev/null +++ b/examples/tracer/src/patch_modules.py @@ -0,0 +1,16 @@ +import requests + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +MODULES = ["requests"] + +tracer = Tracer(patch_modules=MODULES) + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + ret = requests.get("https://httpbin.org/get") + ret.raise_for_status() + + return ret.json() diff --git a/examples/tracer/src/put_trace_annotations.py b/examples/tracer/src/put_trace_annotations.py new file mode 100644 index 00000000000..0d9455c7acd --- /dev/null +++ b/examples/tracer/src/put_trace_annotations.py @@ -0,0 +1,15 @@ +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +def collect_payment(charge_id: str) -> str: + tracer.put_annotation(key="PaymentId", value=charge_id) + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return collect_payment(charge_id=charge_id) diff --git a/examples/tracer/src/put_trace_metadata.py b/examples/tracer/src/put_trace_metadata.py new file mode 100644 index 00000000000..23b6753677c --- /dev/null +++ b/examples/tracer/src/put_trace_metadata.py @@ -0,0 +1,21 @@ +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +def collect_payment(charge_id: str) -> str: + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + payment_context = { + "charge_id": event.get("charge_id", ""), + "merchant_id": event.get("merchant_id", ""), + "request_id": context.aws_request_id, + } + payment_context["receipt_id"] = collect_payment(charge_id=payment_context["charge_id"]) + tracer.put_metadata(key="payment_response", value=payment_context) + + return payment_context["receipt_id"] diff --git a/examples/tracer/src/sdk_escape_hatch.py b/examples/tracer/src/sdk_escape_hatch.py new file mode 100644 index 00000000000..7f046caff9d --- /dev/null +++ b/examples/tracer/src/sdk_escape_hatch.py @@ -0,0 +1,19 @@ +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +def collect_payment(charge_id: str) -> str: + return f"dummy payment collected for charge: {charge_id}" + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + with tracer.provider.in_subsegment("## collect_payment") as subsegment: + subsegment.put_annotation(key="PaymentId", value=charge_id) + ret = collect_payment(charge_id=charge_id) + subsegment.put_metadata(key="payment_response", value=ret) + + return ret diff --git a/examples/tracer/src/tracer_reuse.py b/examples/tracer/src/tracer_reuse.py new file mode 100644 index 00000000000..5f12f82b714 --- /dev/null +++ b/examples/tracer/src/tracer_reuse.py @@ -0,0 +1,12 @@ +from tracer_reuse_payment import collect_payment + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.utilities.typing import LambdaContext + +tracer = Tracer() + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> str: + charge_id = event.get("charge_id", "") + return collect_payment(charge_id=charge_id) diff --git a/examples/tracer/src/tracer_reuse_payment.py b/examples/tracer/src/tracer_reuse_payment.py new file mode 100644 index 00000000000..109d1cfbc53 --- /dev/null +++ b/examples/tracer/src/tracer_reuse_payment.py @@ -0,0 +1,8 @@ +from aws_lambda_powertools import Tracer + +tracer = Tracer() + + +@tracer.capture_method +def collect_payment(charge_id: str) -> str: + return f"dummy payment collected for charge: {charge_id}" diff --git a/examples/tracer/src/tracing_aiohttp.py b/examples/tracer/src/tracing_aiohttp.py new file mode 100644 index 00000000000..45fe6a46f38 --- /dev/null +++ b/examples/tracer/src/tracing_aiohttp.py @@ -0,0 +1,25 @@ +import asyncio +import os + +import aiohttp + +from aws_lambda_powertools import Tracer +from aws_lambda_powertools.tracing import aiohttp_trace_config +from aws_lambda_powertools.utilities.typing import LambdaContext + +ENDPOINT = os.getenv("PAYMENT_API", "") + +tracer = Tracer() + + +@tracer.capture_method +async def collect_payment(charge_id: str) -> dict: + async with aiohttp.ClientSession(trace_configs=[aiohttp_trace_config()]) as session: + async with session.get(f"{ENDPOINT}/collect") as resp: + return await resp.json() + + +@tracer.capture_lambda_handler +def handler(event: dict, context: LambdaContext) -> dict: + charge_id = event.get("charge_id", "") + return asyncio.run(collect_payment(charge_id=charge_id)) diff --git a/poetry.lock b/poetry.lock index f67f25b12ca..77300c37213 100644 --- a/poetry.lock +++ b/poetry.lock @@ -22,7 +22,7 @@ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (> [[package]] name = "aws-xray-sdk" -version = "2.9.0" +version = "2.10.0" description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service." category = "main" optional = false @@ -30,7 +30,6 @@ python-versions = "*" [package.dependencies] botocore = ">=1.11.3" -future = "*" wrapt = "*" [[package]] @@ -243,7 +242,7 @@ toml = "*" [[package]] name = "flake8-bugbear" -version = "22.4.25" +version = "22.7.1" description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." category = "dev" optional = false @@ -344,7 +343,7 @@ python-versions = "*" name = "future" version = "0.18.2" description = "Clean single-source support for Python 3 and 2" -category = "main" +category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" @@ -627,8 +626,8 @@ typing-extensions = ">=4.1.0" [[package]] name = "mypy-boto3-dynamodb" -version = "1.24.0" -description = "Type annotations for boto3.DynamoDB 1.24.0 service generated with mypy-boto3-builder 7.6.1" +version = "1.24.12" +description = "Type annotations for boto3.DynamoDB 1.24.12 service generated with mypy-boto3-builder 7.7.1" category = "dev" optional = false python-versions = ">=3.6" @@ -638,8 +637,8 @@ typing-extensions = ">=4.1.0" [[package]] name = "mypy-boto3-secretsmanager" -version = "1.24.0" -description = "Type annotations for boto3.SecretsManager 1.24.0 service generated with mypy-boto3-builder 7.6.1" +version = "1.24.11.post3" +description = "Type annotations for boto3.SecretsManager 1.24.11 service generated with mypy-boto3-builder 7.7.1" category = "dev" optional = false python-versions = ">=3.6" @@ -740,6 +739,14 @@ category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +[[package]] +name = "py-cpuinfo" +version = "8.0.0" +description = "Get CPU info with pure Python 2 & 3" +category = "dev" +optional = false +python-versions = "*" + [[package]] name = "pycodestyle" version = "2.8.0" @@ -838,6 +845,23 @@ pytest = ">=5.4.0" [package.extras] testing = ["coverage", "hypothesis (>=5.7.1)"] +[[package]] +name = "pytest-benchmark" +version = "3.4.1" +description = "A ``pytest`` fixture for benchmarking code. It will group the tests into rounds that are calibrated to the chosen timer." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.dependencies] +py-cpuinfo = "*" +pytest = ">=3.8" + +[package.extras] +aspect = ["aspectlib"] +elasticsearch = ["elasticsearch"] +histogram = ["pygal", "pygaljs"] + [[package]] name = "pytest-cov" version = "3.0.0" @@ -1101,7 +1125,7 @@ pydantic = ["pydantic", "email-validator"] [metadata] lock-version = "1.1" python-versions = "^3.6.2" -content-hash = "e457c68bd754118733c7ad1c54d389f4aa3b06164d947fae5d682566e202b776" +content-hash = "3f3f95ab1a8cf1351639687362e20bd26b784330d309f49a2f5a307682fe5879" [metadata.files] atomicwrites = [ @@ -1113,8 +1137,8 @@ attrs = [ {file = "attrs-21.2.0.tar.gz", hash = "sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb"}, ] aws-xray-sdk = [ - {file = "aws-xray-sdk-2.9.0.tar.gz", hash = "sha256:b0cd972db218d4d8f7b53ad806fc6184626b924c4997ae58fc9f2a8cd1281568"}, - {file = "aws_xray_sdk-2.9.0-py2.py3-none-any.whl", hash = "sha256:98216b3ac8281b51b59a8703f8ec561c460807d9d0679838f5c0179d381d7e58"}, + {file = "aws-xray-sdk-2.10.0.tar.gz", hash = "sha256:9b14924fd0628cf92936055864655354003f0b1acc3e1c3ffde6403d0799dd7a"}, + {file = "aws_xray_sdk-2.10.0-py2.py3-none-any.whl", hash = "sha256:7551e81a796e1a5471ebe84844c40e8edf7c218db33506d046fec61f7495eda4"}, ] bandit = [ {file = "bandit-1.7.1-py3-none-any.whl", hash = "sha256:f5acd838e59c038a159b5c621cf0f8270b279e884eadd7b782d7491c02add0d4"}, @@ -1225,8 +1249,8 @@ flake8-black = [ {file = "flake8_black-0.2.3-py3-none-any.whl", hash = "sha256:cc080ba5b3773b69ba102b6617a00cc4ecbad8914109690cfda4d565ea435d96"}, ] flake8-bugbear = [ - {file = "flake8-bugbear-22.4.25.tar.gz", hash = "sha256:f7c080563fca75ee6b205d06b181ecba22b802babb96b0b084cc7743d6908a55"}, - {file = "flake8_bugbear-22.4.25-py3-none-any.whl", hash = "sha256:ec374101cddf65bd7a96d393847d74e58d3b98669dbf9768344c39b6290e8bd6"}, + {file = "flake8-bugbear-22.7.1.tar.gz", hash = "sha256:e450976a07e4f9d6c043d4f72b17ec1baf717fe37f7997009c8ae58064f88305"}, + {file = "flake8_bugbear-22.7.1-py3-none-any.whl", hash = "sha256:db5d7a831ef4412a224b26c708967ff816818cabae415e76b8c58df156c4b8e5"}, ] flake8-builtins = [ {file = "flake8-builtins-1.5.3.tar.gz", hash = "sha256:09998853b2405e98e61d2ff3027c47033adbdc17f9fe44ca58443d876eb00f3b"}, @@ -1434,12 +1458,12 @@ mypy-boto3-appconfig = [ {file = "mypy_boto3_appconfig-1.24.0-py3-none-any.whl", hash = "sha256:ca53b0b9606f13257dd0feb800d36531f2eba54f46bd9db7765f69baf9583485"}, ] mypy-boto3-dynamodb = [ - {file = "mypy-boto3-dynamodb-1.24.0.tar.gz", hash = "sha256:a7de204a173dffbee972357a69bf5e59fda169a587017e0d3c5446676342aa2e"}, - {file = "mypy_boto3_dynamodb-1.24.0-py3-none-any.whl", hash = "sha256:866f0f8ae44e266ea051f57179bf40132d8e89e6fa23abab6e71421b3c0cd794"}, + {file = "mypy-boto3-dynamodb-1.24.12.tar.gz", hash = "sha256:4fc6f0f84988ae3d307a43ec31930483828b199f1179cb801238c16cd8be5901"}, + {file = "mypy_boto3_dynamodb-1.24.12-py3-none-any.whl", hash = "sha256:7ad9aa9c23a9f90d0aa0018df3a975e6c1da32f76c11aef60bf1a49cfca840cc"}, ] mypy-boto3-secretsmanager = [ - {file = "mypy-boto3-secretsmanager-1.24.0.tar.gz", hash = "sha256:6680c322df031b08ef79fcdb8ffdfb08d57d4925392f641348336926dc5c6b2e"}, - {file = "mypy_boto3_secretsmanager-1.24.0-py3-none-any.whl", hash = "sha256:7da281c49ae91e60fdbcd0015379ae4cc9dc9ff911836ee78a2652310e09f53e"}, + {file = "mypy-boto3-secretsmanager-1.24.11.post3.tar.gz", hash = "sha256:f153b3f5ff2c65664a906fb2c97a6598a57da9f1da77679dbaf541051dcff36e"}, + {file = "mypy_boto3_secretsmanager-1.24.11.post3-py3-none-any.whl", hash = "sha256:d9655d568f7fd8fe05265613b85fba55ab6e4dcd078989af1ef9f0ffe4b45019"}, ] mypy-boto3-ssm = [ {file = "mypy-boto3-ssm-1.24.0.tar.gz", hash = "sha256:bab58398947c3627a4e7610cd0f57b525c12fd1d0a6bb862400b6af0a4e684fc"}, @@ -1477,6 +1501,9 @@ py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, ] +py-cpuinfo = [ + {file = "py-cpuinfo-8.0.0.tar.gz", hash = "sha256:5f269be0e08e33fd959de96b34cd4aeeeacac014dd8305f70eb28d06de2345c5"}, +] pycodestyle = [ {file = "pycodestyle-2.8.0-py2.py3-none-any.whl", hash = "sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20"}, {file = "pycodestyle-2.8.0.tar.gz", hash = "sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f"}, @@ -1542,6 +1569,10 @@ pytest-asyncio = [ {file = "pytest-asyncio-0.16.0.tar.gz", hash = "sha256:7496c5977ce88c34379df64a66459fe395cd05543f0a2f837016e7144391fcfb"}, {file = "pytest_asyncio-0.16.0-py3-none-any.whl", hash = "sha256:5f2a21273c47b331ae6aa5b36087047b4899e40f03f18397c0e65fa5cca54e9b"}, ] +pytest-benchmark = [ + {file = "pytest-benchmark-3.4.1.tar.gz", hash = "sha256:40e263f912de5a81d891619032983557d62a3d85843f9a9f30b98baea0cd7b47"}, + {file = "pytest_benchmark-3.4.1-py2.py3-none-any.whl", hash = "sha256:36d2b08c4882f6f997fd3126a3d6dfd70f3249cde178ed8bbc0b73db7c20f809"}, +] pytest-cov = [ {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"}, diff --git a/pyproject.toml b/pyproject.toml index 53b73908361..b665a760889 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "aws_lambda_powertools" -version = "1.26.2" +version = "1.26.3" description = "A suite of utilities for AWS Lambda functions to ease adopting best practices such as tracing, structured logging, custom metrics, batching, idempotency, feature flags, and more." authors = ["Amazon Web Services"] include = ["aws_lambda_powertools/py.typed", "THIRD-PARTY-LICENSES"] @@ -50,15 +50,16 @@ bandit = "^1.7.1" radon = "^5.1.0" xenon = "^0.9.0" flake8-eradicate = "^1.2.1" -flake8-bugbear = "^22.4.25" +flake8-bugbear = "^22.7.1" mkdocs-git-revision-date-plugin = "^0.3.2" mike = "^0.6.0" mypy = "^0.961" mkdocs-material = "^8.2.7" -mypy-boto3-secretsmanager = "^1.24.0" +mypy-boto3-secretsmanager = "^1.24.11" mypy-boto3-ssm = "^1.24.0" mypy-boto3-appconfig = "^1.24.0" -mypy-boto3-dynamodb = "^1.24.0" +mypy-boto3-dynamodb = "^1.24.12" +pytest-benchmark = "^3.4.1" [tool.poetry.extras] diff --git a/tests/functional/test_logger.py b/tests/functional/test_logger.py index f4a814e4913..c8b3dc61755 100644 --- a/tests/functional/test_logger.py +++ b/tests/functional/test_logger.py @@ -625,6 +625,40 @@ def handler(event, context): assert all(k in second_log for k in lambda_context_keys) +def test_logger_custom_formatter_has_standard_and_custom_keys(stdout, service_name, lambda_context): + class CustomFormatter(LambdaPowertoolsFormatter): + ... + + # GIVEN a Logger is initialized with a custom formatter + logger = Logger(service=service_name, stream=stdout, logger_formatter=CustomFormatter(), my_key="value") + + # WHEN a lambda function is decorated with logger + @logger.inject_lambda_context + def handler(event, context): + logger.info("Hello") + + handler({}, lambda_context) + + standard_keys = ( + "level", + "location", + "message", + "timestamp", + "service", + "cold_start", + "function_name", + "function_memory_size", + "function_arn", + "function_request_id", + ) + + log = capture_logging_output(stdout) + + # THEN all standard keys should be available + assert all(k in log for k in standard_keys) + assert "my_key" in log + + def test_logger_custom_handler(lambda_context, service_name, tmp_path): # GIVEN a Logger is initialized with a FileHandler log_file = tmp_path / "log.json" @@ -773,3 +807,20 @@ def handler(event, context): # THEN logger should log event received from Lambda logged_event, _ = capture_multiple_logging_statements_output(stdout) assert logged_event["message"] == lambda_event + + +def test_inject_lambda_context_with_additional_args(lambda_context, stdout, service_name): + # GIVEN Logger is initialized + logger = Logger(service=service_name, stream=stdout) + + # AND a handler that use additional parameters + @logger.inject_lambda_context + def handler(event, context, planet, str_end="."): + logger.info(f"Hello {planet}{str_end}") + + handler({}, lambda_context, "World", str_end="!") + + # THEN the decorator should included them + log = capture_logging_output(stdout) + + assert log["message"] == "Hello World!" diff --git a/tests/performance/conftest.py b/tests/performance/conftest.py deleted file mode 100644 index 30cb371ca87..00000000000 --- a/tests/performance/conftest.py +++ /dev/null @@ -1,18 +0,0 @@ -import time -from contextlib import contextmanager -from typing import Generator - - -@contextmanager -def timing() -> Generator: - """ "Generator to quickly time operations. It can add 5ms so take that into account in elapsed time - - Examples - -------- - - with timing() as t: - print("something") - elapsed = t() - """ - start = time.perf_counter() - yield lambda: time.perf_counter() - start # gen as lambda to calculate elapsed time diff --git a/tests/performance/test_high_level_imports.py b/tests/performance/test_high_level_imports.py index e3914b26f57..7639065dd83 100644 --- a/tests/performance/test_high_level_imports.py +++ b/tests/performance/test_high_level_imports.py @@ -1,8 +1,6 @@ import importlib -import time -from contextlib import contextmanager from types import ModuleType -from typing import Generator, Tuple +from typing import Tuple import pytest @@ -10,86 +8,88 @@ METRICS_INIT_SLA: float = 0.005 TRACER_INIT_SLA: float = 0.5 IMPORT_INIT_SLA: float = 0.035 +PARENT_PACKAGE = "aws_lambda_powertools" +TRACING_PACKAGE = "aws_lambda_powertools.tracing" +LOGGING_PACKAGE = "aws_lambda_powertools.logging" +METRICS_PACKAGE = "aws_lambda_powertools.metrics" -@contextmanager -def timing() -> Generator: - """ "Generator to quickly time operations. It can add 5ms so take that into account in elapsed time +def import_core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]: + """Dynamically imports and return Tracing, Logging, and Metrics modules""" + return ( + importlib.import_module(TRACING_PACKAGE), + importlib.import_module(LOGGING_PACKAGE), + importlib.import_module(METRICS_PACKAGE), + ) - Examples - -------- - with timing() as t: - print("something") - elapsed = t() - """ - start = time.perf_counter() - yield lambda: time.perf_counter() - start # gen as lambda to calculate elapsed time +@pytest.fixture(autouse=True) +def clear_cache(): + importlib.invalidate_caches() -def core_utilities() -> Tuple[ModuleType, ModuleType, ModuleType]: - """Return Tracing, Logging, and Metrics module""" - tracing = importlib.import_module("aws_lambda_powertools.tracing") - logging = importlib.import_module("aws_lambda_powertools.logging") - metrics = importlib.import_module("aws_lambda_powertools.metrics") +def import_init_tracer(): + tracing = importlib.import_module(TRACING_PACKAGE) + tracing.Tracer(disabled=True) - return tracing, logging, metrics + +def import_init_metrics(): + metrics = importlib.import_module(METRICS_PACKAGE) + metrics.Metrics() + + +def import_init_logger(): + logging = importlib.import_module(LOGGING_PACKAGE) + logging.Logger() @pytest.mark.perf -def test_import_times_ceiling(): +@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False) +def test_import_times_ceiling(benchmark): # GIVEN Core utilities are imported # WHEN none are used # THEN import and any global initialization perf should be below 30ms # though we adjust to 35ms to take into account different CI machines, etc. # instead of re-running tests which can lead to false positives - with timing() as t: - core_utilities() - - elapsed = t() - if elapsed > IMPORT_INIT_SLA: - pytest.fail(f"High level imports should be below ${IMPORT_INIT_SLA}s: {elapsed}") + benchmark.pedantic(import_core_utilities) + stat = benchmark.stats.stats.max + if stat > IMPORT_INIT_SLA: + pytest.fail(f"High level imports should be below {IMPORT_INIT_SLA}s: {stat}") @pytest.mark.perf -def test_tracer_init(): +@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False) +def test_tracer_init(benchmark): # GIVEN Tracer is initialized # WHEN default options are used # THEN initialization X-Ray SDK perf should be below 450ms # though we adjust to 500ms to take into account different CI machines, etc. # instead of re-running tests which can lead to false positives - with timing() as t: - tracing, _, _ = core_utilities() - tracing.Tracer(disabled=True) # boto3 takes ~200ms, and remaining is X-Ray SDK init - - elapsed = t() - if elapsed > TRACER_INIT_SLA: - pytest.fail(f"High level imports should be below ${TRACER_INIT_SLA}s: {elapsed}") + benchmark.pedantic(import_init_tracer) + stat = benchmark.stats.stats.max + if stat > TRACER_INIT_SLA: + pytest.fail(f"High level imports should be below {TRACER_INIT_SLA}s: {stat}") @pytest.mark.perf -def test_metrics_init(): +@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False) +def test_metrics_init(benchmark): # GIVEN Metrics is initialized # WHEN default options are used # THEN initialization perf should be below 5ms - with timing() as t: - _, _, metrics = core_utilities() - metrics.Metrics() - - elapsed = t() - if elapsed > METRICS_INIT_SLA: - pytest.fail(f"High level imports should be below ${METRICS_INIT_SLA}s: {elapsed}") + benchmark.pedantic(import_init_metrics) + stat = benchmark.stats.stats.max + if stat > METRICS_INIT_SLA: + pytest.fail(f"High level imports should be below ${METRICS_INIT_SLA}s: {stat}") @pytest.mark.perf -def test_logger_init(): +@pytest.mark.benchmark(group="core", disable_gc=True, warmup=False) +def test_logger_init(benchmark): # GIVEN Logger is initialized # WHEN default options are used # THEN initialization perf should be below 5ms - with timing() as t: - _, logging, _ = core_utilities() - logging.Logger() - - elapsed = t() - if elapsed > LOGGER_INIT_SLA: - pytest.fail(f"High level imports should be below ${LOGGER_INIT_SLA}s: {elapsed}") + benchmark.pedantic(import_init_logger) + stat = benchmark.stats.stats.max + if stat > LOGGER_INIT_SLA: + pytest.fail(f"High level imports should be below ${LOGGER_INIT_SLA}s: {stat}")